diff --git a/.github/workflows/lcas-figshare-processing.yaml b/.github/workflows/lcas-figshare-processing.yaml index 8b94fe9..59e59c0 100644 --- a/.github/workflows/lcas-figshare-processing.yaml +++ b/.github/workflows/lcas-figshare-processing.yaml @@ -71,10 +71,10 @@ jobs: cd ./output if [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ "${{ github.event.inputs.use_author_cache }}" = "true" ]; then echo "Running figshare_fetch.py with --use-author-cache (manually triggered)" - python ../figshare_fetch.py --use-author-cache -f ../lcas-authors.txt --max-retries 30 --rate-limit-delay 0.1 + python ../figshare_fetch.py --use-author-cache -c ../lcas-authors.yaml --max-retries 30 --rate-limit-delay 0.1 else echo "Running figshare_fetch.py without cache (default behavior)" - python ../figshare_fetch.py -f ../lcas-authors.txt --max-retries 30 --rate-limit-delay 0.1 + python ../figshare_fetch.py -c ../lcas-authors.yaml --max-retries 30 --rate-limit-delay 0.1 fi - name: Run figshare bibtex (Step 2 - Generate bibtex from CSV) diff --git a/.github/workflows/uoa11-figshare-processing.yaml b/.github/workflows/uoa11-figshare-processing.yaml index 78b8e9f..52bc834 100644 --- a/.github/workflows/uoa11-figshare-processing.yaml +++ b/.github/workflows/uoa11-figshare-processing.yaml @@ -71,10 +71,10 @@ jobs: cd ./output if [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ "${{ github.event.inputs.use_author_cache }}" = "true" ]; then echo "Running figshare_fetch.py with --use-author-cache (manually triggered)" - python ../figshare_fetch.py -f ../uoa11-authors.txt --use-author-cache --max-retries 30 --rate-limit-delay 0.1 -o uoa11-figshare_articles.csv -O uoa11-figshare_articles_all.csv + python ../figshare_fetch.py -c ../uoa11-authors.yaml --use-author-cache --max-retries 30 --rate-limit-delay 0.1 -o uoa11-figshare_articles.csv -O uoa11-figshare_articles_all.csv else echo "Running figshare_fetch.py without cache (default behavior)" - python ../figshare_fetch.py -f ../uoa11-authors.txt --rate-limit-delay 0.1 --max-retries 30 -o uoa11-figshare_articles.csv -O uoa11-figshare_articles_all.csv + python ../figshare_fetch.py -c ../uoa11-authors.yaml --rate-limit-delay 0.1 --max-retries 30 -o uoa11-figshare_articles.csv -O uoa11-figshare_articles_all.csv fi - name: Run figshare bibtex (Step 2 - Generate bibtex from CSV) diff --git a/author.py b/author.py index d4813b7..f1e9e38 100644 --- a/author.py +++ b/author.py @@ -13,17 +13,42 @@ class Author: - def __init__(self, name, debug=False, rate_limit_delay=1.0, max_retries=5): + """Represents an author and manages their Figshare article collection. + + This class handles retrieving, processing, and caching article metadata for + a specific author from the Figshare repository. + """ + + def __init__(self, name, user_id=None, institution_id=None, orcid=None, debug=False, rate_limit_delay=1.0, max_retries=5): + """Initialize an Author instance. + + Args: + name: Author's full name (required) + user_id: Figshare user ID (optional, improves search accuracy) + institution_id: Institution ID for filtering articles (optional, recommended) + orcid: Author's ORCID identifier (optional, for reference) + debug: Enable debug logging (default: False) + rate_limit_delay: Delay in seconds between API requests (default: 1.0) + max_retries: Maximum retry attempts for failed API calls (default: 5) + """ self.logger = getLogger("Author") if debug: self.logger.setLevel(DEBUG) self.name = name + self.user_id = user_id + self.institution_id = institution_id + self.orcid = orcid self.fs = FigShare(rate_limit_delay=rate_limit_delay, max_retries=max_retries) self.articles = {} self.public_html_prefix = "https://repository.lincoln.ac.uk" self.df = None def save(self, filename=None): + """Save author's articles and dataframe to a persistent cache file. + + Args: + filename: Path to cache file (default: '{author_name}.db') + """ if filename is None: filename = f"{self.name}.db" with shelve.open(filename) as db: @@ -31,6 +56,11 @@ def save(self, filename=None): db['df'] = self.df def load(self, filename=None): + """Load author's articles and dataframe from a persistent cache file. + + Args: + filename: Path to cache file (default: '{author_name}.db') + """ if filename is None: filename = f"{self.name}.db" with shelve.open(filename) as db: @@ -39,12 +69,47 @@ def load(self, filename=None): def _retrieve_figshare(self, use_cache=True): + """Retrieve articles for this author from Figshare. + + Uses the most precise search method available based on the author metadata: + - If user_id and/or institution_id are available, uses articles_by_author() + with filtering for more accurate results + - Otherwise, falls back to simple name-based search + + Args: + use_cache: Whether to use cached API results (default: True) + """ self.logger.info(f"retrieving articles for {self.name}") - self.articles = self.fs.articles_by_user_name(self.name, use_cache=use_cache) + + # Use enhanced search with user_id tracking and institution filtering when available + if self.user_id or self.institution_id: + if self.user_id and self.institution_id: + self.logger.info(f"Using enhanced search for user_id {self.user_id} with institution_id {self.institution_id}") + elif self.user_id: + self.logger.info(f"Using enhanced search for user_id {self.user_id}") + else: + self.logger.info(f"Using enhanced search with institution_id {self.institution_id}") + + self.articles = self.fs.articles_by_author( + self.name, + user_id=self.user_id, + institution_id=self.institution_id, + use_cache=use_cache + ) + else: + self.logger.info(f"Using basic name search (no user_id or institution_id available)") + self.articles = self.fs.articles_by_user_name(self.name, use_cache=use_cache) self.logger.info(f"found {len(self.articles)} articles for {self.name}") def _retrieve_details(self, use_cache=True): + """Retrieve detailed metadata for each article. + + Fetches full article details including custom fields, tags, categories, etc. + + Args: + use_cache: Whether to use cached API results (default: True) + """ for article in self.articles: self.logger.info(f"retrieving details for article {article['id']}") article['details'] = self.fs.get_article(article['id'], use_cache=use_cache) @@ -122,9 +187,13 @@ def _retrieve_bibtex_from_dois(self): self.logger.warning(f"Failed to get bibtex for {doi}: {e}") def _flatten(self): + self.logger.info(f"flattening article dicts for {self.name}") new_articles = [] for a in self.articles: - new_articles.append(flatten(a, reducer='path')) + try: + new_articles.append(flatten(a, reducer='path')) + except Exception as e: + self.logger.warning(f"Failed to flatten article {a}: {e}") self.articles = new_articles def retrieve(self, use_cache=True): @@ -144,18 +213,27 @@ def _create_dataframe(self): self.df = pd.DataFrame.from_dict(self.articles) # add column with author name self.df['author'] = self.name + # add column with online date (as datetime object) - self.df['online_date'] = pd.to_datetime(self.df['timeline/firstOnline'], utc=True) + if 'timeline/firstOnline' in self.df.columns: + self.df['online_date'] = pd.to_datetime(self.df['timeline/firstOnline'], utc=True) + else: + self.logger.warning(f"'timeline/firstOnline' field not found, setting online_date to NaT") + self.df['online_date'] = pd.NaT + # add column with online year self.df['online_year'] = self.df['online_date'].apply( - lambda x: x.year + lambda x: x.year if pd.notna(x) else None ) + # add column with external DOI, parsed from custom_fields - self.df['External DOI'] = self.df['details/custom_fields/External DOI'].apply( - lambda x: re.sub(r'^(?:https?://doi\.org/|doi:)', '', x[0], flags=re.IGNORECASE).replace('doi:','') - if isinstance(x, list) and len(x) > 0 else None - ) - - + if 'details/custom_fields/External DOI' in self.df.columns: + self.df['External DOI'] = self.df['details/custom_fields/External DOI'].apply( + lambda x: re.sub(r'^(?:https?://doi\.org/|doi:)', '', x[0], flags=re.IGNORECASE).replace('doi:','') + if isinstance(x, list) and len(x) > 0 else None + ) + else: + self.logger.warning(f"'details/custom_fields/External DOI' field not found, setting External DOI to None") + self.df['External DOI'] = None return self.df diff --git a/figshare_api.py b/figshare_api.py index cedea9d..7f1f150 100644 --- a/figshare_api.py +++ b/figshare_api.py @@ -139,8 +139,80 @@ def __post(self, url, params=None, use_cache=True): self.logger.warning(f"Received empty or invalid JSON response for POST {self.base_url + url} (status: {response.status_code})") return [] + def articles_by_author(self, author_name, user_id=None, institution_id=None, use_cache=True): + """Search for articles by author name with optional institution filtering. + Uses the Figshare search API with the :author: search operator and optional + institution parameter. Note: Figshare's search API does not support searching + by author_id directly, so we use the author name for search and apply + institution filtering to narrow results. + + Args: + author_name: The author's full name to search for (required) + user_id: Figshare user ID (optional, used only for logging/reference) + institution_id: Institution ID to filter articles (optional, recommended + for more precise results when available) + use_cache: Whether to use cached results (default: True) + + Returns: + List of article dictionaries matching the search criteria. Each article + contains metadata like id, title, authors, DOI, etc. + + Example: + articles = fs.articles_by_author( + "Marc Hanheide", + user_id=17159320, + institution_id=1068 + ) + """ + params = self.__init_params() + + # Use :author: search operator with author name + # This is the only reliable way to search by author in Figshare API + params["search_for"] = f':author: "{author_name}"' + + # Add institution filter as direct parameter if provided + # This significantly narrows results when multiple authors share the same name + if institution_id: + params["institution"] = institution_id + self.logger.info(f"Filtering by institution_id: {institution_id}") + + # Paginate through all results + page = 1 + articles = [] + while True: + params["page"] = page + if user_id: + self.logger.info(f"retrieving page {page} for {author_name} (user_id: {user_id})") + else: + self.logger.info(f"retrieving page {page} for {author_name}") + current_page_articles = self.__post("/articles/search", params=params, use_cache=use_cache) + page += 1 + if len(current_page_articles) == 0: + break + articles += current_page_articles + + if user_id: + self.logger.info(f"found {len(articles)} articles for {author_name} (user_id: {user_id})") + else: + self.logger.info(f"found {len(articles)} articles for {author_name}") + + return articles + def articles_by_user_name(self, user_name, use_cache=True): + """Search for articles by author name without additional filtering. + + This is a simpler version of articles_by_author() without institution + filtering or user_id tracking. Use articles_by_author() for more precise + searches when institution_id is available. + + Args: + user_name: The author's full name to search for + use_cache: Whether to use cached results (default: True) + + Returns: + List of article dictionaries matching the author name + """ params = self.__init_params() params["search_for"] = f':author: \"{user_name}\"' page = 1 @@ -159,3 +231,17 @@ def articles_by_user_name(self, user_name, use_cache=True): def get_article(self, article_id, use_cache=True): return self.__get(f"/articles/{article_id}", use_cache=use_cache) + + def search_authors(self, params, use_cache=True): + """Search for authors using the Figshare account API. + + Args: + params: Dictionary with search parameters (search, orcid, is_active, + is_public, group_id, institution_id) + use_cache: Whether to use cached results + + Returns: + List of author dictionaries matching the search criteria + """ + self.logger.info(f"Searching for authors with params: {params}") + return self.__post("/account/authors/search", params=params, use_cache=use_cache) diff --git a/figshare_fetch.py b/figshare_fetch.py index 7dfa00c..78f9fd7 100755 --- a/figshare_fetch.py +++ b/figshare_fetch.py @@ -13,9 +13,11 @@ import pandas as pd import os import argparse +import yaml from logging import getLogger, basicConfig, INFO, DEBUG from author import Author +from figshare_api import FigShare basicConfig(level=INFO) logger = getLogger(__name__) @@ -24,13 +26,11 @@ def parse_args(): """Parse command-line arguments.""" parser = argparse.ArgumentParser( - description="Fetch publications from FigShare repository for specified authors and create CSV files.", + description="Fetch publications from FigShare repository for authors specified in YAML file and create CSV files.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) - parser.add_argument('-a', '--authors', nargs='+', - help='List of author names to process (uses default list if not specified)') - parser.add_argument('-f', '--authors-file', type=str, - help='Path to file containing list of authors, one per line (uses default list if not specified)') + parser.add_argument('-c', '--config', type=str, required=True, + help='Path to YAML configuration file containing authors and filters (required)') parser.add_argument('-o', '--output', type=str, default='figshare_articles.csv', help='Output CSV filename for publications, without duplicates') parser.add_argument('-O', '--output-all', type=str, default='figshare_articles_all.csv', @@ -47,64 +47,183 @@ def parse_args(): return parser.parse_args() -def load_authors_from_file(filename): - """Load author names from a file, one per line.""" +def load_yaml_config(filename): + """Load YAML configuration file containing authors and filters.""" try: with open(filename, 'r') as f: - return [line.strip() for line in f if line.strip()] + config = yaml.safe_load(f) + if not config: + logger.error(f"Empty YAML configuration file: {filename}") + return None + if 'authors' not in config: + logger.error(f"YAML file must contain 'authors' section") + return None + return config + except FileNotFoundError: + logger.error(f"Configuration file not found: {filename}") + return None + except yaml.YAMLError as e: + logger.error(f"Error parsing YAML file {filename}: {e}") + return None except Exception as e: - logger.error(f"Error loading authors from file {filename}: {e}") - return [] + logger.error(f"Error loading configuration from file {filename}: {e}") + return None + + +def search_author(fs, name=None, orcid=None, user_id=None, filters=None): + """Search for an author using Figshare API. + + Args: + fs: FigShare API instance + name: Author name (optional if user_id or orcid provided) + orcid: Author ORCID (optional) + user_id: Author user_id (optional, takes precedence) + filters: Dictionary of filter criteria (is_active, is_public, group_id, institution_id) + + Returns: + Dictionary with 'id', 'full_name', 'orcid_id', or None if not found + """ + # If user_id is provided, we don't need to search + if user_id: + logger.info(f"Using provided user_id: {user_id} for {name}") + return {'id': user_id, 'full_name': name, 'orcid_id': orcid} + + # Build search parameters + search_params = {} + + # Add search criteria (orcid takes precedence over name) + if orcid: + search_params['orcid'] = orcid + logger.info(f"Searching for author by ORCID: {orcid}") + elif name: + search_params['search_for'] = name + logger.info(f"Searching for author by name: {name}") + else: + logger.error("At least one of name, orcid, or user_id must be provided") + return None + + # Add optional filters + if filters: + if 'is_active' in filters and filters['is_active'] is not None: + search_params['is_active'] = filters['is_active'] + if 'is_public' in filters and filters['is_public'] is not None: + search_params['is_public'] = filters['is_public'] + if 'group_id' in filters and filters['group_id'] is not None: + search_params['group_id'] = filters['group_id'] + # Handle both 'institution_id' (correct spelling) and 'instituion_id' (typo in YAML) + institution_id = filters.get('institution_id') or filters.get('instituion_id') + if institution_id is not None: + search_params['institution_id'] = institution_id + + # Search for authors + results = fs.search_authors(search_params) + + if not results: + logger.error(f"No author found for {name if name else orcid}") + return None + + if len(results) > 1: + logger.warning(f"Multiple authors found ({len(results)}) for {name if name else orcid}. Using first result: {results[0].get('full_name', 'Unknown')}") + else: + logger.info(f"Found author: {results[0].get('full_name', 'Unknown')} (id: {results[0].get('id', 'Unknown')})") + + return results[0] def figshare_fetch(): """ - Fetch FigShare publications for specified authors and create CSV files. + Fetch FigShare publications for authors specified in YAML configuration and create CSV files. This function: - 1. Retrieves publication data for each author from FigShare - 2. Combines all publications into a single dataset - 3. Removes duplicates while preserving author information - 4. Exports results to CSV files (without bibtex generation) + 1. Loads author configuration from YAML file + 2. Searches for authors using Figshare API with optional filters + 3. Retrieves publication data for each author from FigShare + 4. Combines all publications into a single dataset + 5. Removes duplicates while preserving author information + 6. Exports results to CSV files (without bibtex generation) """ args = parse_args() if args.debug: logger.setLevel(DEBUG) - # Get list of authors + # Load YAML configuration + config = load_yaml_config(args.config) + if not config: + logger.error("Failed to load configuration file. Exiting.") + return + + authors_config = config.get('authors', []) + filters = config.get('filters', {}) + + if not authors_config: + logger.error("No authors defined in configuration file. Exiting.") + return + + logger.info(f"Loaded configuration with {len(authors_config)} authors") + if filters: + logger.info(f"Applied filters: {filters}") + + # Initialize FigShare API + fs = FigShare(rate_limit_delay=args.rate_limit_delay, max_retries=args.max_retries) + + # Resolve authors using Figshare API authors_list = [] - if args.authors: - authors_list.extend(args.authors) - if args.authors_file: - authors_list.extend(load_authors_from_file(args.authors_file)) + for author_config in authors_config: + name = author_config.get('name') + if not name: + logger.warning("Author entry missing 'name' field, skipping") + continue + + orcid = author_config.get('orcid') + user_id = author_config.get('user_id') + + # Search for author + author_info = search_author(fs, name=name, orcid=orcid, user_id=user_id, filters=filters) + + if author_info: + # Use the resolved name from Figshare or fall back to config name + resolved_name = author_info.get('full_name', name) + # Store author metadata for precise article searching + author_metadata = { + 'name': resolved_name, + 'user_id': author_info.get('id'), + 'institution_id': author_info.get('institution_id'), + 'orcid': author_info.get('orcid_id') + } + authors_list.append(author_metadata) + logger.info(f"Resolved author: {resolved_name} (id: {author_info.get('id')})") + else: + logger.warning(f"Could not resolve author: {name}, skipping") - # Use default authors if none specified if not authors_list: - authors_list = [ - "Marc Hanheide", "Marcello Calisti", "Grzegorz Cielniak", - "Simon Parsons", "Elizabeth Sklar", "Paul Baxter", - "Petra Bosilj", "Heriberto Cuayahuitl", "Gautham Das", - "Francesco Del Duchetto", "Charles Fox", "Leonardo Guevara", - "Helen Harman", "Mohammed Al-Khafajiy", "Alexandr Klimchik", - "Riccardo Polvara", "Athanasios Polydoros", "Zied Tayeb", - "Sepehr Maleki", "Junfeng Gao", "Tom Duckett", "Mini Rai", - "Amir Ghalamzan Esfahani" - ] - logger.info(f"Using default list of {len(authors_list)} authors") - else: - logger.info(f"Processing {len(authors_list)} authors from command line/file") + logger.error("No authors could be resolved. Exiting.") + return + + logger.info(f"Processing {len(authors_list)} resolved authors") authors = {} df_all = None authors_to_process = [] # Track authors that need detail retrieval + # Get institution_id from filters for article search + institution_id = filters.get('institution_id') or filters.get('instituion_id') + # First pass: Initialize authors and retrieve basic figshare data logger.info("=== Phase 1: Retrieving basic article data from Figshare ===") - for author_name in authors_list: + for author_metadata in authors_list: + author_name = author_metadata['name'] logger.info(f"*** Processing {author_name}...") - authors[author_name] = Author(author_name, debug=args.debug, rate_limit_delay=args.rate_limit_delay, max_retries=args.max_retries) + authors[author_name] = Author( + author_metadata['name'], + user_id=author_metadata['user_id'], + institution_id=institution_id, + orcid=author_metadata['orcid'], + debug=args.debug, + rate_limit_delay=args.rate_limit_delay, + max_retries=args.max_retries + ) cache_exists = os.path.exists(f"{author_name}.db") if cache_exists and args.use_author_cache: @@ -130,7 +249,8 @@ def figshare_fetch(): # Third pass: Aggregate dataframes and save individual CSVs logger.info("=== Phase 3: Aggregating and saving results ===") - for author_name in authors_list: + for author_metadata in authors_list: + author_name = author_metadata["name"] if authors[author_name].df is not None: if df_all is None: df_all = authors[author_name].df diff --git a/lcas-authors.yaml b/lcas-authors.yaml new file mode 100644 index 0000000..376ee04 --- /dev/null +++ b/lcas-authors.yaml @@ -0,0 +1,37 @@ +# the institution ID is used to ensure only authors from a specific institution are included + +filters: + # is_active: true + # is_public: true + # filter by group ID (set to None to disable) + #group_id: 51958 + instituion_id: 1068 + +authors: + - name: "Marc Hanheide" + orcid: "0000-0001-7728-1849" + user_id: 17159320 + - name: "Riccardo Polvara" + user_id: 17165437 + - name: "Marcello Calisti" + - name: "Grzegorz Cielniak" + - name: "Simon Parsons" + - name: "Elizabeth Sklar" + - name: "Paul Baxter" + - name: "Petra Bosilj" + - name: "Heriberto Cuayahuitl" + - name: "Gautham Das" + - name: "Francesco Del Duchetto" + - name: "Charles Fox" + - name: "Leonardo Guevara" + - name: "Helen Harman" + - name: "Mohammed Al-Khafajiy" + - name: "Alexandr Klimchik" + - name: "Athanasios Polydoros" + - name: "Zied Tayeb" + - name: "Sepehr Maleki" + - name: "Junfeng Gao" + - name: "Tom Duckett" + - name: "Mini Rai" + - name: "Amir Ghalamzan Esfahani" + diff --git a/requirements-frozen.txt b/requirements-frozen.txt index 04d1afa..78a6e9c 100644 --- a/requirements-frozen.txt +++ b/requirements-frozen.txt @@ -1,15 +1,23 @@ -aiohappyeyeballs==2.6.1 +bibtexparser==1.4.3 +doi2bib==0.4.0 +Flask==3.1.0 +flatten-dict==0.4.2 +networkx==3.4.2 +numpy==2.2.4 +pandas==2.2.3 +PyYAML==6.0.3 +requests==2.32.3 +pip==25.0.1 aiohttp==3.11.14 +yake==0.4.8 +## The following requirements were added by pip freeze: +aiohappyeyeballs==2.6.1 aiosignal==1.3.2 attrs==25.3.0 -bibtexparser==1.4.3 blinker==1.9.0 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 -doi2bib==0.4.0 -Flask==3.1.0 -flatten-dict==0.4.2 frozenlist==1.5.0 future==1.0.0 idna==3.10 @@ -18,20 +26,15 @@ jellyfish==1.1.3 Jinja2==3.1.6 MarkupSafe==3.0.2 multidict==6.2.0 -networkx==3.4.2 -numpy==2.2.4 -pandas==2.2.3 propcache==0.3.0 pyparsing==3.2.1 python-dateutil==2.9.0.post0 pytz==2025.1 regex==2024.11.6 -requests==2.32.3 segtok==1.5.11 six==1.17.0 tabulate==0.9.0 tzdata==2025.1 urllib3==2.3.0 Werkzeug==3.1.3 -yake==0.4.8 yarl==1.18.3 diff --git a/requirements.txt b/requirements.txt index 4221f6f..8394f76 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ flatten-dict networkx numpy pandas +PyYAML requests pip aiohttp diff --git a/test-authors.yaml b/test-authors.yaml new file mode 100644 index 0000000..98d8138 --- /dev/null +++ b/test-authors.yaml @@ -0,0 +1,11 @@ +# the institution ID is used to ensure only authors from a specific institution are included + +filters: + # is_active: true + # is_public: true + # filter by group ID (set to None to disable) + instituion_id: 1068 + +authors: + - name: "James Brown" + diff --git a/uoa11-authors.yaml b/uoa11-authors.yaml new file mode 100644 index 0000000..37fdf9a --- /dev/null +++ b/uoa11-authors.yaml @@ -0,0 +1,45 @@ +# the institution ID is used to ensure only authors from a specific institution are included + +filters: + # is_active: true + # is_public: true + # filter by group ID (set to None to disable) + instituion_id: 1068 + +authors: + - name: "Paul Baxter" + - name: "Leonardo Guevara" + - name: "Miao Yu" + - name: "Francesco Del Duchetto" + - name: "Alexandr Klimchik" + - name: "Abimbola Sangodoyin" + - name: "Athanasios Polydoros" + - name: "Heriberto Cuayahuitl" + - name: "Fiona Strens" + - name: "Gautham Das" + - name: "Olivier Szymanezyk" + - name: "John Atanbori" + - name: "Hamna Aslam" + - name: "Themis Papaioannou" + - name: "Bashir Al-Diri" + - name: "Khaled Bachour" + - name: "Riccardo Polvara" + - name: "Ionut Moraru" + - name: "Renata Ntelia" + - name: "Charles Fox" + - name: "Simon Parsons" + - name: "Mohammed Al-Khafajiy" + - name: "James Brown" + orcid: "0000-0001-7636-4554" + - name: "Mark Doughty" + - name: "Christos Frantzidis" + - name: "Wenting Duan" + - name: "Yvonne James" + - name: "Kabiru Maiyama" + - name: "Mamatha Thota" + - name: "Patrick Dickinson" + - name: "Helen Harman" + - name: "Marc Hanheide" + orcid: "0000-0001-7728-1849" + - name: "Elizabeth Sklar" + - name: "Grzegorz Cielniak" \ No newline at end of file