diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 6124de90e4..5f2dd781d1 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -7,7 +7,7 @@ Hi there! Many thanks for taking an interest in improving the sanger-tol website
If you'd like to write some code for sanger-tol/pipelines-website, the standard workflow is as follows:
1. Check that there isn't already an issue about your idea in the
- [sanger-tol/pipelines-website issues](<[https://github.com/nf-core/nf-co.re/issues](https://github.com/sanger-tol/pipelines-website/issues)>) to avoid duplicating work.
+ [sanger-tol/pipelines-website issues](https://github.com/sanger-tol/pipelines-website/issues) to avoid duplicating work.
- If there isn't one already, please create one so that others know you're working on this
2. Fork the [sanger-tol/pipelines-website repository](https://github.com/sanger-tol/pipelines-website) to your GitHub account
3. Make the necessary changes / additions within your forked repository
@@ -23,4 +23,4 @@ For now, the only test is for Markdown syntax, using the `markdownlint` package.
## Getting help
-For further information or help, please [contact us](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+).
+For further information or help, please [contact us](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+).
diff --git a/.github/ISSUE_TEMPLATE/add_team.yaml b/.github/ISSUE_TEMPLATE/add_team.yaml
index f2a9640880..fca1e595cf 100644
--- a/.github/ISSUE_TEMPLATE/add_team.yaml
+++ b/.github/ISSUE_TEMPLATE/add_team.yaml
@@ -3,7 +3,7 @@ description: Add details to include your team details to the website
title: '[Team]: '
labels: ['team', 'enhancement']
assignees:
- - priyanka-surana
+ - muffato
body:
- type: markdown
attributes:
diff --git a/.github/ISSUE_TEMPLATE/add_team_members.yaml b/.github/ISSUE_TEMPLATE/add_team_members.yaml
index 10cbf3f187..2012c3b69c 100644
--- a/.github/ISSUE_TEMPLATE/add_team_members.yaml
+++ b/.github/ISSUE_TEMPLATE/add_team_members.yaml
@@ -3,7 +3,7 @@ description: Add your details to the team page on the website
title: '[Team member]: '
labels: ['team', 'enhancement']
assignees:
- - priyanka-surana
+ - muffato
body:
- type: markdown
attributes:
diff --git a/.github/ISSUE_TEMPLATE/add_team_projects.yaml b/.github/ISSUE_TEMPLATE/add_team_projects.yaml
index f4aec5d822..b231d1c838 100644
--- a/.github/ISSUE_TEMPLATE/add_team_projects.yaml
+++ b/.github/ISSUE_TEMPLATE/add_team_projects.yaml
@@ -3,7 +3,7 @@ description: Add project details to include in team page on the website
title: '[Project]: '
labels: ['team', 'enhancement']
assignees:
- - priyanka-surana
+ - muffato
body:
- type: markdown
attributes:
diff --git a/.github/ISSUE_TEMPLATE/add_tool.yaml b/.github/ISSUE_TEMPLATE/add_tool.yaml
index 101935fd6a..79d5f426ed 100644
--- a/.github/ISSUE_TEMPLATE/add_tool.yaml
+++ b/.github/ISSUE_TEMPLATE/add_tool.yaml
@@ -3,7 +3,7 @@ description: Add details to include your tool to the website
title: '[Tool]: '
labels: ['tool', 'enhancement']
assignees:
- - priyanka-surana
+ - muffato
body:
- type: markdown
attributes:
diff --git a/.github/ISSUE_TEMPLATE/contact_us.yaml b/.github/ISSUE_TEMPLATE/contact_us.yaml
index fdf851f1fa..2711ce84ed 100644
--- a/.github/ISSUE_TEMPLATE/contact_us.yaml
+++ b/.github/ISSUE_TEMPLATE/contact_us.yaml
@@ -3,7 +3,6 @@ description: Get in touch with the Tree of Life informatics teams.
title: '[Contact Us]: '
labels: ['connect']
assignees:
- - priyanka-surana
- muffato
body:
- type: markdown
diff --git a/.github/ISSUE_TEMPLATE/publications.yaml b/.github/ISSUE_TEMPLATE/publications.yaml
index a124341b7c..acedd17c8d 100644
--- a/.github/ISSUE_TEMPLATE/publications.yaml
+++ b/.github/ISSUE_TEMPLATE/publications.yaml
@@ -2,7 +2,7 @@ name: Add publications
description: Add publication to the sanger-tol website
title: '[Publication]: DOI'
assignees:
- - priyanka-surana
+ - muffato
body:
- type: markdown
attributes:
diff --git a/.github/rich-codex.yml b/.github/rich-codex.yml
deleted file mode 100644
index d429d652e9..0000000000
--- a/.github/rich-codex.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-timeout: 300
-outputs:
- - command: nf-core --help
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nfcore_help.svg
- - command: nf-core list
- head: 19
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nfcore_list.svg
- - command: nf-core create --name demo --description "Pipeline for training" --author "Phil" --plain
- fake_command: nf-core create
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nfcore_create.svg
- - command: cd nf-core-demo && git status
- fake_command: git status
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/git_status.svg
- - command: cd nf-core-demo && git branch
- fake_command: git branch
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/git_branch.svg
- - command: cd nf-core-demo && git log --oneline
- fake_command: git log --oneline
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/git_log.svg
- - command: nextflow run nf-core-demo/ -profile test,docker --outdir test_results
- title: nextflow run
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nextflow_run_1.svg
- - command: cd nf-core-demo && nf-core lint && grep -rl TODO .
- fake_command: nf-core lint
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nfcore_lint_warnings.svg
- - command: |
- cd nf-core-demo && \
- grep -rl TODO . | xargs sed -i 's/TODO//g' && \
- echo "ADDED" >> CODE_OF_CONDUCT.md && \
- nf-core lint
- fake_command: nf-core lint
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nfcore_lint_failure.svg
- - command: nextflow run nf-core-demo/ --help
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nextflow_run_help.svg
- - command: nextflow run nf-core-demo/ -profile test,docker
- title: nextflow run
- img_paths:
- - public_html/assets/markdown_assets/developers/creating_with_nf_core/nextflow_run_no_outdir.svg
diff --git a/.github/workflows/rich-codex.yml b/.github/workflows/rich-codex.yml
deleted file mode 100644
index f7c434ac1d..0000000000
--- a/.github/workflows/rich-codex.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-name: Generate images for docs
-on:
- workflow_dispatch:
-jobs:
- rich_codex:
- runs-on: ubuntu-latest
- steps:
- - name: Check out the repo
- uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: 3.x
-
- - name: Install Nextflow
- uses: nf-core/setup-nextflow@v1
-
- - name: Install nf-core/tools
- run: pip install nf-core
-
- - name: Generate terminal images with rich-codex
- # Change back to @v1 when v1.2.7 is released
- uses: ewels/rich-codex@main
- env:
- COLUMNS: 100
- HIDE_PROGRESS: 'true'
- NXF_ANSI_LOG: false
- with:
- commit_changes: 'true'
- terminal_width: 100
- skip_git_checks: 'true'
diff --git a/.prettierignore b/.prettierignore
index 27b26971f6..a195bd713c 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -19,7 +19,6 @@ public_html/tools/docs/
nfcore_stats.json
nfcore_issue_stats.json
markdown/pipelines
-markdown/tools
api_cache
contributor_stats
update.log
@@ -43,4 +42,4 @@ php.ini
node_modules
vendor
backstop_data
-.mysql-data/
\ No newline at end of file
+.mysql-data/
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index e44bd599b6..554d9cad52 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -50,7 +50,7 @@ Members of the Safety Team (the Safety Officers) are responsible for clarifying
The Safety Team have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this CoC, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-Members of the Safety Team who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and will be subject to the same actions as others in violation of the CoC.
+Members of the Safety Team who violate the CoC will be required to recurse themselves pending investigation. They will not have access to any reports of the violations and will be subject to the same actions as others in violation of the CoC.
## When and where does this Code of Conduct apply?
@@ -122,7 +122,7 @@ After you file a report, one or more members of our Safety Team will contact you
All reports will be read and handled by the members of the Safety Team at sanger-tol.
-If members of the Safety Team are deemed to have a conflict of interest with a report, they will be required to recuse themselves as per our Code of Conduct and will not have access to any follow-ups.
+If members of the Safety Team are deemed to have a conflict of interest with a report, they will be required to recurse themselves as per our Code of Conduct and will not have access to any follow-ups.
To keep this first report confidential from any of the Safety Team members, please submit your first report by direct email to any of the safety officers you are comfortable disclosing the information to, and be explicit about which member(s) you do not consent to sharing the information with.
diff --git a/README.md b/README.md
index 080eda4573..41291315e5 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ docker compose up
You should then be able to access the website in your browser at [http://localhost:8888/](http://localhost:8888/).
If you prefer, you can also use a tool such as [MAMP](https://www.mamp.info/) - if so,
-set the base directory to `/path/to/nf-co.re/public_html` in _Preferences > Web-Server > Document Root_ and then hit _Start Servers_.
+set the base directory to `/path/to/pipelines-website/public_html` in _Preferences > Web-Server > Document Root_ and then hit _Start Servers_.
Most of the hand-written text is in `/markdown`, to make it easier to write. The PHP files in `/public_html` then parse this into HTML dynamically, if supplied with a filename.
@@ -160,11 +160,12 @@ If you are looking forward to contribute to the website or add your institution
## Community
-If you have any questions or issues, please [let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+).
+If you have any questions or issues, please [let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+).
## Credits
-Priyanka Surana ([@priyanka-surana](http://github.com/priyanka-surana/)) manages the content and Guoying Qi ([@gq1](https://github.com/gq1)) manages the website. Many individuals, especially Matthieu Muffato ([@muffato](http://github.com/muffato)), have made various contributions.
+Matthieu Muffato ([@muffato](http://github.com/muffato)) manages the content and Guoying Qi ([@gq1](https://github.com/gq1)) manages the website.
+Many individuals, especially Priyanka Surana ([@priyanka-surana](http://github.com/priyanka-surana/)), have made various contributions.
Phil Ewels ([@ewels](http://github.com/ewels/)) built the original nf-core website.
More recently, [@mashehu](https://github.com/mashehu) has done a great deal of work with the code.
diff --git a/composer.json b/composer.json
index ff1dd17e8c..19c1aa224a 100644
--- a/composer.json
+++ b/composer.json
@@ -1,6 +1,6 @@
{
- "name": "nf-core/nf-co.re",
- "description": "PHP dependencies for the main nf-core website.",
+ "name": "sanger-tol/pipelines-website",
+ "description": "PHP dependencies for the sanger-tol pipelines website.",
"require": {
"abraham/twitteroauth": "^2.0",
"spatie/calendar-links": "1.8",
diff --git a/includes/footer.php b/includes/footer.php
index a39672c85d..3eb88fdeac 100644
--- a/includes/footer.php
+++ b/includes/footer.php
@@ -86,35 +86,27 @@
Read how to configure the Seqera Platform CLI here.
diff --git a/includes/search_results.php b/includes/search_results.php
index 220a498368..dff9035fce 100644
--- a/includes/search_results.php
+++ b/includes/search_results.php
@@ -4,7 +4,7 @@
require '../vendor/autoload.php';
use Spyc;
-// $search_term - should be availble from include
+// $search_term - should be available from include
$search_results = [
'pipelines' => [],
'documentation' => [],
diff --git a/markdown/about.md b/markdown/about.md
index d5bc95b289..eab8b111d0 100644
--- a/markdown/about.md
+++ b/markdown/about.md
@@ -4,21 +4,73 @@ The sanger-tol project is the informatics branch of the Tree of Life programme a
Here you can read how we organise ourselves, how we are funded and how the sanger-tol project was started.
We welcome external contributions and collaboration on our projects.
-If you'd like to be involved, [drop us a message](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+) or comment directly on an existing GitHub issue.
+If you'd like to be involved, [drop us a message](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+) or comment directly on an existing GitHub issue.
Please note that all community members are expected to adhere to our [code of conduct](/code_of_conduct), which is adopted from [nf-core](https://nf-co.re).
## Informatics Infrastructure team {#it}
-The [Informatics Infrastructure team](/it) provides support for the production of reference genome assemblies and large-scale genome analyses in the Tree of Life programme, and helps with the management and use of informatics resources and digital solutions. Current members are listed below:
+The Informatics Infrastructure team provides support for the production of reference genome assemblies and large-scale genome analyses in the Tree of Life programme, and helps with the management and use of informatics resources and digital solutions.
-- [Matthieu Muffato](/it#matthieu-muffato)
-- [Guoying Qi](/it#guoying-qi)
-- [Priyanka Surana](/it#priyanka-surana)
-- [Cibin Sadasivan Baby](/it#cibin-sadasivan-baby)
-- [Cibele Sotero-Caio](/it#cibele-sotero-caio)
-- [Paul Davis](/it#paul-davis)
-- [Beth Yates](/it#beth-yates)
+The team is organised in three poles.
+
+๐ **Data management**: Our data curators and managers maintain the integrity, consistency, and quality, or multiple databases used in production, including [Genomes on a Tree (GoaT)](/tools#genomes-on-a-tree-goat), [Collaborative Open Plant Omics (COPO)](https://copo-project.org/), the [European Nucleotide Archive](https://www.ebi.ac.uk/ena/browser/home), and our internal sample-tracking systems.
+
+๐ป **Bioinformatics**: Our bioinformaticians develop the suite of analysis pipelines that will run on every genome produced in Tree of Life, providing a central database of core results available for all.
+
+๐ฉ **Systems**: We develop and maintain some core systems used in production, including the execution and tracking of all bioinformatics pipelines, and the deployment of third-party web applications for internal use.
+
+### Tech stack {#it-tech-stack}
+
+
+The team uses a wide range of technologies, frameworks and programming languages, including Nextflow, Python, Conda, Jira, LSF, Singularity, and Kubernetes. The technology wheel below shows most of their logos. How many can you recognise?
+
+### Members {#it-members}
+
+Current members are listed below:
+
+- [Matthieu Muffato](#matthieu-muffato)
+- [Guoying Qi](#guoying-qi)
+- [Cibin Sadasivan Baby](#cibin-sadasivan-baby)
+- [Cibele Sotero-Caio](#cibele-sotero-caio)
+- [Paul Davis](#paul-davis)
+- [Beth Yates](#beth-yates)
+
+#### Matthieu Muffato, Team Lead {#matthieu-muffato}
+
+[](https://www.sanger.ac.uk/person/muffato-matthieu/) [](https://github.com/muffato) [](https://www.linkedin.com/in/matthieu-muffato/)
+ Matthieu leads the Informatics Infrastructure team, which guides the implementation and delivery of the genome assembly pipelines, and provides support for large-scale genome analyses for the Tree of Life faculty teams. He joined the Wellcome Sanger Institute in February 2021, to form the Informatics Infrastructure team for the Tree of Life programme. He has recruited 7 team members, with skills covering data curation & management, software development & operations, and bioinformatics.
+
+#### Guoying Qi, DevOps Software Developer {#guoying-qi}
+
+[](https://github.com/gq1) [](https://www.linkedin.com/in/guoying-qi/)
+ Guoying, a DevOps software engineer, has the responsibility of developing and deploying software and web applications for the Tree of Life project across various platforms such as computing farms, Kubernetes, OpenStack, and public clouds.
+
+#### Cibin Sadasivan Baby, Senior Software Developer {#cibin-sadasivan-baby}
+
+[](https://github.com/cibinsb) [](https://www.linkedin.com/in/cibinsb/)
+ Cibin, a Senior Software Developer, is tasked with designing and implementing the production systems for TOL-IT. Currently, Cibin is focused on building an automated platform to execute high-throughput genomic pipelines. The ultimate goal of this project is to develop a system capable of efficiently processing large amounts of genomic data.
+
+#### Cibele Sotero-Caio, Genomic Data Curator {#cibele-sotero-caio}
+
+[](https://www.sanger.ac.uk/person/sotero-caio-cibele) [](https://github.com/ccaio) [](https://www.linkedin.com/in/cibele-sotero-caio-b379071a6/) [](https://twitter.com/CibeleCaio)
+ Cibele is the data curator for the Genomes on a Tree (GoaT) - a platform developed to support the Tree of Life and other sequencing initiatives of the Earth Biogenome project (EBP).
+
+#### Paul Davis, Data Manager {#paul-davis}
+
+[](https://www.sanger.ac.uk/person/davis-paul/) [](https://github.com/Paul-Davis) [](https://www.linkedin.com/in/paul-davis-uk/) [](https://twitter.com/SirPaulDavis)
+ Paul works on the main ToL Genome Engine. This system was developed by the ToL to manage and track samples from collection, onboarding, processing in the lab, sequencing and finally the publication of the assembly and Genome Note publication. As there are many steps in this process developing methodology to identify issues as early as possible is vital to avoid wasted time and resource. Paul works at all levels of the project fielding questions about data flow, data fixes and helps other ToL staff and project stakeholders with data and information. Paul also interacts with external groups and stakeholders to maintain data integrity in the public domain.
+
+#### Beth Yates, Bioinformatics Engineer {#beth-yates}
+
+[](https://github.com/BethYates) [](https://www.linkedin.com/in/bethanyates/)
+ Beth is a Bioinformatics Engineer working on a building a platform to automate the production of [Genome Note publications](https://wellcomeopenresearch.org/treeoflife). The Universal Genome Note platform consists of a web portal, database and Nextflow pipelines. Beth is contributing to the genomenote pipeline, this pipeline fetches assembly meta data and generates some of the figures and statistics included in each genome note.
+
+#### Alumni and Friends {#alumni}
+
+- Alexander Ramos-Dรญaz, Google Summer of Code contributor. Alexander kick-started the development of the [BlobToolKit](/blobtoolkit) pipeline through the [2022 edition of Google Summer of Code](https://summerofcode.withgoogle.com/archive/2022/organizations/wellcome-sanger-institute).
+- Zaynab Butt, Informatics and Digital Associate. Zaynab continued the development of the [BlobToolKit](/blobtoolkit) pipeline, and then developed the web interface of our internal tool for tracking workflow execution.
+- Priyanka Surana, Senior Bioinformatician. Priyanka was a Senior Bioinformatician, overseeing the development of Nextflow pipelines for genome assembly, curation and downstream analyses. She is passionate about building networks that support peer learning and facilitated the workflows community on campus.
## Safety team {#safety}
@@ -49,6 +101,6 @@ Almost half a decade and one pandemic later, Tree of Life has grown solid roots
Other projects have popped up alongside Darwin Tree of Life. For example, the Aquatic Symbiosis Genomics project which explores the genomics of symbiotic organisms in the ocean. Or BIOSCAN in the UK, which will study one million flying insects over five years to help better understand and monitor the health of their ecosystems. Tree of Life now has four full-time faculty, plus associate faculty members, exploring a multitude of genomic questions from rapid speciation to strange reproduction.
-Key to being able to produce top-quality genomes at scale is building top-quality informatics systems. To support genome production, Tree of Life teams have either adopted existing softwares and tailored them to the programmeโs needs, or developed bespoke platforms from scratch such as the Samples Tracking System. Within the bioinformatics teams, who piece together the DNA data into whole genome assemblies, new tools are being developed all the time to automate and increase efficiency, and to tackle specific issues, for example mitochondrial genome assemblies or contamination by DNA from other species. The breadth of talent in these teams ranges from early-career researchers to experienced hands whose time at Sanger can be traced back to the original Human Genome Project.
+Key to being able to produce top-quality genomes at scale is building top-quality informatics systems. To support genome production, Tree of Life teams have either adopted existing software and tailored them to the programmeโs needs, or developed bespoke platforms from scratch such as the Samples Tracking System. Within the bioinformatics teams, who piece together the DNA data into whole genome assemblies, new tools are being developed all the time to automate and increase efficiency, and to tackle specific issues, for example mitochondrial genome assemblies or contamination by DNA from other species. The breadth of talent in these teams ranges from early-career researchers to experienced hands whose time at Sanger can be traced back to the original Human Genome Project.
Together, these scientific endeavours, using the latest DNA sequencing technology, will enable us to tackle the extinction crisis, discover new biomedicines and biotechnologies, and better understand 3.5 billion years of life on Earth.
diff --git a/markdown/contributing b/markdown/contributing
deleted file mode 120000
index 1fa9382266..0000000000
--- a/markdown/contributing
+++ /dev/null
@@ -1 +0,0 @@
-developers/
\ No newline at end of file
diff --git a/markdown/developers/credit-policy.md b/markdown/developers/credit-policy.md
new file mode 100644
index 0000000000..2366212cdc
--- /dev/null
+++ b/markdown/developers/credit-policy.md
@@ -0,0 +1,37 @@
+---
+title: Credit Policy
+subtitle: Guidelines for crediting people's contributions
+---
+
+Here we outline, how to credit the work of others as it is incorporated into your pipeline. It is important and critical that we acknowledge those who support our work.
+
+## In scripts
+
+If the original concept of your script or parts of the code are borrowed, please acknowledge with a comment at the top.
+
+```
+# This script is based on https://github.com/nf-core/rnaseq/blob/master/bin/check_samplesheet.py
+or
+# This script was originally conceived by @github-username
+```
+
+## In `README.md`
+
+Under `Credits` in `README.md`
+
+- Acknowledge those who wrote the scripts and local modules
+- Acknowledge those who provide concepts and ideas
+- Acknowledge the code reviewers and those who contributed through discussions
+- Acknowledge nf-core and Nextflow Slack community members for answering queries
+
+## In `CITATIONS.md`
+
+List all pipeline tools with links and references.
+
+```
+- [STAR](https://pubmed.ncbi.nlm.nih.gov/23104886/)
+
+ > Dobin A, Davis CA, Schlesinger F, Drenkow J, Zaleski C, Jha S, Batut P, Chaisson M, Gingeras TR. STAR: ultrafast universal RNA-seq aligner Bioinformatics. 2013 Jan 1;29(1):15-21. doi: 10.1093/bioinformatics/bts635. Epub 2012 Oct 25. PubMed PMID: 23104886; PubMed Central PMCID: PMC3530905.
+
+- [BBMap](https://sourceforge.net/projects/bbmap/)
+```
diff --git a/markdown/developers/env.md b/markdown/developers/env.md
new file mode 100644
index 0000000000..6c97b8afc5
--- /dev/null
+++ b/markdown/developers/env.md
@@ -0,0 +1,91 @@
+---
+title: Pipeline Development Environment
+subtitle: Where and how we develop our pipelines
+---
+
+## GitHub usage
+
+### Repository organisation
+
+The default branch of our repository is called `main`, following the new GitHub convention, as opposite to `master` in [nf-core/modules](https://github.com/nf-core/modules).
+
+The `main` and `dev` branches are "protected" and can only be modified with pull-requests.
+
+### Access
+
+- All our pipeline repositories are public: everyone has read access.
+- All sanger-tol Nextflow developers have write access to all Nextflow repositories.
+ - Direct push to `main` and `dev` is not allowed, since the two branches are protected.
+- Within each team (Tree of Life Assembly and Informatics Infrastructure), everyone has ["maintain" access](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/managing-repository-roles/repository-roles-for-an-organization#permissions-for-each-role) to the team repositories.
+- A selection of people from the Informatics Infrastructure team have ["admin" access](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/managing-repository-roles/repository-roles-for-an-organization#permissions-for-each-role) to all Nextflow repositories.
+- @muffato and @mcshane are ["owners"](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#permissions-for-organization-roles).
+- Additional contributors can be added, if sponsored by a repository admin.
+
+## Farm environment
+
+To develop and run pipelines on our LSF compute farms, first make sure you have set up the "module" and "Conda" environments as per https://ssg-confluence.internal.sanger.ac.uk/display/TOL/Farm+environment
+
+Modules are used to expose Nextflow alone
+
+```
+$ module load nextflow/23.10.0-5889
+$ nextflow -version
+
+ N E X T F L O W
+ version 23.10.0 build 5889
+ created 15-10-2023 15:07 UTC (16:07 BST)
+ cite doi:10.1038/nbt.3820
+ http://nextflow.io
+```
+
+Conda environments are used to expose the nf-core command-line tool, together with a Nextflow and some development helpers like `prettier` or `nf-test`.
+
+```
+$ conda activate nf-core_2.11
+$ nextflow -version
+
+ N E X T F L O W
+ version 23.04.1 build 5866
+ created 15-04-2023 06:51 UTC (07:51 BST)
+ cite doi:10.1038/nbt.3820
+ http://nextflow.io
+
+$ nf-core --version
+
+ ,--./,-.
+ ___ __ __ __ ___ /,-._.--~\
+ |\ | |__ __ / ` / \ |__) |__ } {
+ | \| | \__, \__/ | \ |___ \`-._,-`-,
+ `._,._,'
+
+ nf-core/tools version 2.11.1 - https://nf-co.re
+ There is a new version of nf-core/tools available! (2.13.1)
+
+
+nf-core, version 2.11.1
+
+$ nf-test version
+๐ nf-test 0.8.2
+https://code.askimed.com/nf-test
+(c) 2021 - 2023 Lukas Forer and Sebastian Schoenherr
+
+Nextflow Runtime:
+ >
+ N E X T F L O W
+ version 23.04.1 build 5866
+ created 15-04-2023 06:51 UTC (07:51 BST)
+ cite doi:10.1038/nbt.3820
+ http://nextflow.io
+
+$ prettier --version
+3.1.1
+```
+
+Java programs are not really allowed to run on the head nodes so you will have to submit all your Nextflow commands on LSF.
+When developing a pipeline, you may want to do all your development from an interactive LSF job.
+
+In our experience, `nextflow run` commands need 1 CPU and 6 GB RAM for themselves, but of course more if you want to use the "local" executor.
+For all sanger-tol and nf-core pipelines, we recommend using the ["sanger" profile](https://github.com/nf-core/configs/blob/master/conf/sanger.config)
+which automatically activates LSF job submission using the appropriate queues based on each job's parameters.
+When asking Nextflow to submit jobs to LSF, please submit the `nextflow run` command to the `oversubscribed` queue,
+which is designed for workflow managers, that it doesn't take unnecessary compute resources.
diff --git a/markdown/developers/modules.md b/markdown/developers/modules.md
index 13da2cda61..d35ac77da0 100644
--- a/markdown/developers/modules.md
+++ b/markdown/developers/modules.md
@@ -3,12 +3,51 @@ title: Module Writing Guidelines
subtitle: Guidelines on writing a sanger-tol compliant module
---
-## NF-Core Module Guidelines
+## nf-core Module Guidelines
-Generally, modules should conform to the [NF-Core Standards](https://nf-co.re/docs/contributing/modules#new-module-guidelines-and-pr-review-checklist). However, there are a small number of cases where this is not possible.
+Generally, modules should conform to the [nf-core standards](https://nf-co.re/docs/contributing/modules#new-module-guidelines-and-pr-review-checklist). However, there are a small number of cases where this is not possible.
## Sanger-tol additions
+### Pipeline dependencies
+
+To ensure the portability of our pipelines, all pipeline dependencies have to be wrapped into containers.
+Here is the decision tree you can use to decide how software are packaged and used in pipelines.
+
+
+
+Reference URLs:
+
+- Conda search:
+- BioContainers search:
+- Docker hub search:
+- quay.io (another registry of Docker containers, esp. all BioContainers) search:
+ - Our own container registry on quay.io:
+- Our own container registry on GitHub:
+- Our internal container registry on GitLab:
+- Repository of Singularity images by the Galaxy project (should contain all BioContainers):
+- Minimal Dockerfile for creating a container around a Conda package:
+- Instructions to create a new Docker image (when the tool's usage or scope is limited to sanger-tol):
+
+### Nextflow wrapping
+
+Here is the decision tree you can use to decide whether to make a nf-core module or a local one, and how to ship a tool or script with the pipeline.
+
+
+
+###ย Versioning
+
+Assign a version to each script independently, the simplest scheme being to start with 1.0 and incrementing it each time you change the script.
+To simplify maintenance of the script, the module, and the pipeline, we recommend implementing in each script a way of printing a short usage message and a version number. This will also help you remember what the script is for and what arguments to pass !
+
+Here is an example one-liner you can plug at the top of bash scripts
+
+```bash
+if [ $# -ne 2 ]; then echo -e "Script to extract a sequence from a Fasta file.\nUsage: $0 \nVersion 1.0"; exit 1; fi
+```
+
+In Python, you can do the same with the `argparse` package like in [create_table.py#L13](https://github.com/sanger-tol/genomenote/blob/1.1.0/bin/create_table.py#L23).
+
### Pipeline Comments (Optional)
Reading the code of a pipeline can be daunting, especially if picking up the development for said pipeline. And so we propose that there should be structured comments to explain the context of implementation at each module, subworkflow and workflow.
@@ -161,7 +200,7 @@ The original implementation of this module took a cram file and whilst reading,
Nextflow cannot manipulate a data stream passing between two modules. This required us to create a module to pre compute the 10,000 container regions of interest in the cram file (in the form of a csv) and pass these as arguments to the cram*et al* module. Whilst not as performant as the original implementation (due to the small overhead created by Nextflow and the csv generation between parameter generation and cram*et al*), this is much more performant (in terms of compute resources and IO impact) than splitting the cram file into n (( total number of container / 10,000 ) \* no. of cram files) number of files before further manipulation with the next 4 commands. This means that the TreeVal implementation is the best case scenario, as shown below.
-
+
#### Reasons for:
diff --git a/markdown/developers/review_checklist.md b/markdown/developers/review_checklist.md
index ab9f418b26..11ba369de7 100644
--- a/markdown/developers/review_checklist.md
+++ b/markdown/developers/review_checklist.md
@@ -11,7 +11,7 @@ Pipeline developers are recommended to create **modular and small pull requests
Think about that _before_ writing the code and opening the pull-request, as breaking down a PR into multiple ones can be tricky.
As a rule of thumb, a PR should not add more than one sub-workflow, a sub-workflow should not contain more than ten steps. A PR can modify multiple sub-workflows, as long as the changes are related.
-The role of the reviewer is to check for adherence to the central principles of nf-core and sanger-tol (reproducibility, execellent reporting, documented, keeping to the template etc.,). Here we provide a general set of suggestions when doing pipeline reviews:
+The role of the reviewer is to check for adherence to the central principles of nf-core and sanger-tol (reproducibility, excellent reporting, documented, keeping to the template etc.,). Here we provide a general set of suggestions when doing pipeline reviews:
The instructions below are subject to interpretation and specific scenarios. If in doubt, please ask for feedback.
@@ -59,7 +59,7 @@ All sanger-tol pipelines _should_ follow the following guidelines, if possible /
## Do: Documentation {#documentation}
-- Documention is only on the pipelines website (not pointed to other places, e.g. not readthedocs )
+- Documentation is only on the pipelines website (not pointed to other places, e.g. not readthedocs )
- Is documentation sufficiently described (`usage.md`, `output.md`, `nextflow_schema.json`)?
- nextflow_schema.json: check if types are correct and that `default` and `enum` are used where applicable
- Are there any typos in the documentation (`usage.md`, `output.md`, `nextflow_schema.json`)
@@ -96,7 +96,7 @@ We hope that the nf-core best practices, tooling and community are helpful for a
If a pipeline is found to be violating the standards and guidelines, you should try to address the problems with the pipeline maintainers through discussion. Hopefully the pipeline can then be updated so that it adheres to the guidelines.
-All members of the sanger-tol community must adhere to the [sanger-tol code of conduct](https://pipelines.tol.sanger.ac.uk/code_of_conduct).
+All members of the sanger-tol community must adhere to the [sanger-tol code of conduct](/code_of_conduct).
The guidelines and actions within the code of conduct take precedence over the development guidelines described in this page.
## Guidelines
@@ -115,7 +115,7 @@ Do _not_ fork sanger-tol repositories.
When new pipelines are added to sanger-tol, please transfer ownership to sanger-tol instead of forking it.
-If you have already forked your pipeline to sanger-tol, you can [email GitHub support](https://support.github.com/contact?subject=Reroute%20a%20Fork&tags=rr-forks) and request that they reroute the fork. Alternatively, [contact the IT team](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+) and we may be able to help.
+If you have already forked your pipeline to sanger-tol, you can [email GitHub support](https://support.github.com/contact?subject=Reroute%20a%20Fork&tags=rr-forks) and request that they reroute the fork. Alternatively, [contact the IT team](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato%2Cmuffato&labels=connect&projects=&template=contact_us.yaml&title=%5BContact+Us%5D%3A+) and we may be able to help.
**Disable GitHub features for forks**
@@ -171,11 +171,11 @@ Software versions must be static and stable. Labels such as `latest`, `dev`, `ma
### Continuous integration testing
-Pipelines must have automated continuous integration testing, running using GitHub Actions. There must be a small dataset that can be tested on GitHub directly, and a larger one that can be tested on the Sanger farm using Nextflow Tower.
+Pipelines must have automated continuous integration testing, running using GitHub Actions. There must be a small dataset that can be tested on GitHub directly, and a larger one that can be tested on the Sanger farm using Seqera Platform.
There must be a config `profile` called `test` that should be as comprehensive as possible - that is, it should run as much of the pipeline as possible. It should use as tiny test data set as possible (even if the output that it creates is meaningless).
-Then, we configure the integration with Nextflow Tower to allow testing the larger dataset (`test_full`) on the Sanger LSF farm. To set up that up, first add the profile `cleanup { cleanup = true }` to your `nextflow.config` (right at the beginning of the `profiles` section). This is to control the amount of space taken on Lustre. Then, copy the two files [`sanger_test.yml`](https://github.com/sanger-tol/insdcdownload/blob/dev/.github/workflows/sanger_test.yml) and [`sanger_test_full.yml`](https://github.com/sanger-tol/insdcdownload/blob/dev/.github/workflows/sanger_test_full.yml) to your `.github/workflows/`. Ask @muffato to enable the Tower integration for your repository.
+Then, we configure the integration with Seqera Platform to allow testing the larger dataset (`test_full`) on the Sanger LSF farm. To set up that up, first add the profile `cleanup { cleanup = true }` to your `nextflow.config` (right at the beginning of the `profiles` section). This is to control the amount of space taken on Lustre. Then, copy the two files [`sanger_test.yml`](https://github.com/sanger-tol/insdcdownload/blob/dev/.github/workflows/sanger_test.yml) and [`sanger_test_full.yml`](https://github.com/sanger-tol/insdcdownload/blob/dev/.github/workflows/sanger_test_full.yml) to your `.github/workflows/`. Ask @muffato to enable the Seqera Platform integration for your repository.
### Semantic versioning
diff --git a/markdown/events/2022/hackathon-march-2022.md b/markdown/events/2022/hackathon-march-2022.md
new file mode 100644
index 0000000000..77a01d3994
--- /dev/null
+++ b/markdown/events/2022/hackathon-march-2022.md
@@ -0,0 +1,351 @@
+---
+title: Hackathon - March 2022
+subtitle: A virtual online hackathon to develop nf-core together
+type: hackathon
+start_date: '2022-03-16'
+start_time: '10:00+01:00'
+end_date: '2022-03-18'
+end_time: '18:00+01:00'
+youtube_embed: https://www.youtube.com/watch?v=yZ8xX4Jk4zU
+location_name: Gather town and Slack.
+location_url: https://gather.town/
+---
+
+> Organised by [nf-core](https://nf-co.re/events/2022/hackathon-march-2022/)
+
+# Welcome
+
+Please join us for the March 2022 nf-core hackathon!
+The theme for this hackathon will be _Documentation_.
+See [below](#topic-documentation) for details.
+
+This event is entirely virtual (we will be back in [gather.town](https://gather.town/)!) and we are attempting to support people in all timezones :earth_americas: :earth_asia: :earth_africa:
+
+
+
+# Topic: Documentation
+
+The past twelve months have brought some major changes to the nf-core community, with the migration to Nextflow DSL2.
+The nf-core website and documentation is a crucial part of the community.
+In this hackathon, the focus will be _all things documentation_.
+
+Generally speaking, tasks during the hackathon will typically involve going through tutorials,
+testing each command making sure they work, checking phrasing and descriptions to ensure they are clear.
+
+> Most nf-core documentation is written in _Markdown_.
+> If you're not yet familiar with Markdown, please take a few minutes to work through this tutorial:
+
+To keep things manageable, we will organise into the groups.
+You are free to change groups as you wish during the hackathon.
+
+We will also have additional events during the hackathon.
+These will be a presentation on making pipeline-related graphics and a drop-in help desk for customising MultiQC reports. Please see [below](#talks) for more information.
+
+We will coordinate our work and the issues we are working on using a single GitHub [ Project Board](https://github.com/orgs/nf-core/projects/27). There will be a separate tab for each Group where we have collated a list of relevant issues.
+
+## Group 1 - Pipeline documentation
+
+๐ฌ Slack group: [`hackathon-march2022-pipelines`](https://nfcore.slack.com/channels/hackathon-march2022-pipelines)
+๐ Group Leads: [James Fellows Yates](https://github.com/jfy133), [Maxime Garcia](https://github.com/maxulysse)
+๐ป [HackMD](https://hackmd.io/mnAm84mOQLKO0BqGBVuvUg)
+
+This group will focus on nf-core pipelines themselves.
+This is perfect for anyone already familiar with specific pipelines.
+
+- Writing, updating and reviewing documentation for specific pipelines
+- Improving descriptions of the output files generated and how to use them
+- Adding more detailed parameter help
+- Writing usage tutorials
+- Constructing figures and diagrams (eg. pipeline overviews, output documentation)
+
+## Group 2 - DSL2 / modules documentation
+
+๐ฌ Slack group:[`hackathon-march2022-modules`](https://nfcore.slack.com/channels/hackathon-march2022-modules)
+๐ Group Leads: [Harshil Patel](https://github.com/drpatelh), [Edmund Miller](https://github.com/edmundmiller), [Gregor Sturm](https://github.com/grst), [Friederike Hanssen](https://github.com/FriederikeHanssen)
+๐ป [HackMD](https://hackmd.io/bdEPR9COS72hj74EmUMyJQ)
+
+The pace of development with DSL2 and nf-core/modules has been a little crazy over the past months.
+Now things are settling down, we need to bring the website documentation up to date with the current state of the tools.
+
+- Reviewing and updating all general nf-core documentation to DSL2
+- Writing new tutorials how to write DSL2 modules and pipelines
+
+## Group 3 - Training documentation: carpentries, tutorials
+
+๐ฌ Slack group: [`hackathon-march2022-training`](https://nfcore.slack.com/channels/hackathon-march2022-training)
+๐ Group Leads: [Gisela Gabernet](https://github.com/ggabernet), [Mahesh Binzer-Panchal](https://github.com/mahesh-panchal)
+๐ป [HackMD](https://hackmd.io/qLpnWT1OTYWiqSMcQ3gSZA)
+
+There is an increasing amount of Nextflow and nf-core training material available.
+Tutorials and walkthroughs are perhaps some of the most important docs to keep up to date,
+as they are often the first thing that newcomers to the community are exposed to.
+
+This is a good topic for newcomers to the community - you can run through tutorials and learn about Nextflow and nf-core,
+whilst keeping an eye out for things that are not clear or can be improved.
+
+- Reviewing and updating existing Nextflow and nf-core training documentation
+- Search and find more latest nextflow tutorials online
+
+## Group 4 - Website tasks, nf-core/tools package documentation
+
+๐ฌ Slack group: [`hackathon-march2022-tools`](https://nfcore.slack.com/channels/hackathon-march2022-tools)
+๐ Group Leads: [Matthias Hรถrtenhuber](https://github.com/mashehu/), [Phil Ewels](https://github.com/ewels/)
+๐ป [HackMD](https://hackmd.io/9aqPL29_R1ScXO3EvJJvrQ)
+
+All that documentation has to go somewhere!
+This group will focus on restructuring and organising the documentation on the website to improve naviation and user-experience.
+
+- Review and update documentation for nf-core/tools, configs and the website itself.
+- Reviewing and updating how the nf-core/tools documentation is built and rendered.
+- Build a better search mechanism for the website.
+- Restructure website navigation and improve the user experience.
+- Act in a support role for technical help to others in the hackathon.
+
+## Talks
+
+We will also be offering two different additional events throughout the hackathon at 14:30 each day.
+These focus on useful 'soft skills' for developers that can enhance their documentation.
+
+- ๐ "Draw stuff good: how to use Inkscape & creating pipeline diagrams in Inkscape (like that [fancy metro-map](https://raw.githubusercontent.com/nf-core/eager/master/docs/images/usage/eager2_metromap_complex.png) one)" presentation by [James Fellows Yates](https://github.com/jfy133)
+- ๐ "Pimp your MultiQC report" drop-in help desk hosted by [Phil Ewels](https://github.com/ewels)
+
+# Schedule
+
+We expect people to come and go during the hackathon due to diverse time zones.
+Please just do whatever works best for you!
+
+All locations described in the table below refer to places in the Hackathon [gather.town](https://gather.town/)
+space. EMEA stands for the time zones in Europe, Middle East, and Africa.
+
+The following schedule should display times in your local time zone:
+
+
+
+
+
+
+
Time
+
Wed. 16 Mar., 2022
+
Thu. 17 Mar., 2022
+
Fri. 18 Mar., 2022
+
+
+
+
+
02:00
+
+
Check-out/in (Americas/Asia-Pacific)
+
+
Location: Lecture Theatre
+
+
Check-out/in (Americas/Asia-Pacific)
+
+
Location: Lecture Theatre
+
+
+
+
02:30
+
Group distribution (Asia-Pacific)
+
+
Location: Lecture Hall
+
+
Hack!
+
+
+
03:00
+
Hack!
+
+
+
08:00
+
Social event (Asia-Pacific)
+
+
Location: Lounge
+
+
+
+
09:00
+
Hack!
+
+
+
10:00
+
+ Welcome
+
+
Location: Lecture Theatre
+
+
+
+ Check-out/in (Asia-Pacific/EMEA)
+
+
Location: Lecture Theatre
+
+
+
+ Check-out/in (Asia-Pacific/EMEA)
+
+
Location: Lecture Theatre
+
+
+
+
+
10:30
+
Group distribution (EMEA)
+
+
Location: Lecture Hall
+
+
Hack!
+
Hack!
+
+
+
11:00
+
Hack!
+
+
+
11:30
+
+
+
12:00
+
Break
+
+
Location: Cafeteria
+
+
+
Break
+
+
Location: Cafeteria
+
+
+
Break
+
+
Location: Cafeteria
+
+
+
+
+
12:30
+
+
+
13:00
+
Hack!
+
Social Event (EMEA)
+
+
Location: Lounge
+
Hack!
+
+
+
13:30
+
+
+
14:00
+
Hack!
+
+
+
14:30
+
Presentation (Optional)
+
+
Topic: Intro to Inkscape
+
Location: Lecture Theatre
+
Drop in (Optional)
+
+
Topic: Customise MultiQC reports
+
Location: Lecture Theatre
+
+
+
+
15:00
+
+
+
15:15
+
+
+
15:30
+
Break
Location: Cafeteria
+
Break
Location: Cafeteria
+
Break
Location: Cafeteria
+
+
+
16:00
+
Hack!
+
Hack!
+
+ Wrap Up
+
+
Location: Lecture Theatre
+
+
+
+
+
16:30
+
+
+
+
17:00
+
+ Check-out/in (EMEA/Americas)
+
+
Location: Lecture Theatre
+
+
+
+ Check-out/in (EMEA/Americas)
+
+
Location: Lecture Theatre
+
+
+
+
+
17:30
+
Group distribution (Americas)
+
+
Location: Lecture Hall
+
+
Hack!
+
+
+
18:00
+
Hack!
+
+
+
18:30
+
+
+
+
21:00
+
Social event (Americas)
+
+
Location: Lounge
+
+
+
+
+
22:00
+
Hack!
+
+
+
+
+
+
+# Social Activities
+
+During the hackathon, we will have a few light-hearted fun and games!
+
+- Once again we are running our nf-core bingo throughout the three days! See instructions how to play [here](https://nfcore-bingo.web.app/).
+- Breaks will happen in the dedicated Cafeteria room, for informal chatting and getting to know each other.
+- Every day, at 3am and 3pm CET, there will be a new pipeline name to guess in our wordle-clone [nf-cordle](https://nf-co.re/nf-cordle).
+- Finally, during Thursday's social event (see schedule above), we will be running a short quiz!
+
+All social activities are of course optional, but we hope to see as many people joining in as possible :tada:
diff --git a/markdown/events/2022/hackathon-october-2022.md b/markdown/events/2022/hackathon-october-2022.md
new file mode 100644
index 0000000000..a27da9d70e
--- /dev/null
+++ b/markdown/events/2022/hackathon-october-2022.md
@@ -0,0 +1,350 @@
+---
+title: Hackathon - October 2022 (Barcelona)
+subtitle: A hybrid hackathon held in Barcelona and online
+type: hackathon
+announcement.start: 2022-08-02T12:00:00+02:00
+start_date: '2022-10-10'
+start_time: '11:00+02:00'
+end_date: '2022-10-12'
+end_time: '13:00+02:00'
+location_name: Barcelona, Spain
+---
+
+> Organised by [nf-core](https://nf-co.re/events/2022/hackathon-october-2022/)
+
+# Welcome
+
+Please join us for the October 2022 nf-core hackathon!
+
+This hackathon will be held in advance of the _**Nextflow Summit 2022**_ in Barcelona, Spain ๐ช๐ธ. You can find out more information about the summit at .
+
+In addition to the in-person event, we will endeavour to run a hybrid option online in [gather.town](https://gather.town/).
+
+This hackathon is not meant to be a training event but aimed at people that already have a basic understanding of nextflow and/or nf-core. If you're interested in learning Nextflow and nf-core, there will be free online workshops spanning all global timezones in the week before the hackathon. For more information and the training registration link, see the [Nextflow and nf-core training event page](https://nf-co.re/events/2022/training-october-2022).
+
+# Registration
+
+Registration for the in-person hackathon event is now closed. You can still register for the on-line event and the Nextflow Summit:
+
+You can register for either the hackathon or the summit, or both. You're welcome to attend in-person or online.
+Attendance online is free, attending in person costs โฌ49 (โฌ59 incl. VAT).
+
+- In-person registration is now closed.
+- Virtual registration will close October 7, 2022.
+
+ Register now
+
+# Prerequisites
+
+Prior the hackathon, make sure you're signed up/joined/have installed the following resources necessary for participating in the event:
+
+- Check you agree with the [Code of Conduct](https://nf-co.re/code_of_conduct) of the event.
+- If you havenโt already, set-up a GitHub account and join the nf-core GitHub organisation.
+- Join the [nf-core slack](https://nf-co.re/join) and the [`#hackathon-oct-2022` channel](https://nfcore.slack.com/archives/C03V1KD22DT)
+- Have installed on your computer:
+ - [Nextflow](https://nextflow.io/)
+ - [nf-core/tools](https://nf-co.re/tools)
+ - Docker/Singularity/Conda: [Google is your friend]
+- Familiarise yourself with the documentation on the nf-core website for nf-core modules:
+ -
+ -
+ - Relevant nf-core/bytesize talks are also listed below
+
+
+
+If you have any problems with any of these just ask on the slack channel or email [outreach@nf-co.re](mailto:outreach@nf-co.re)
+
+# Joining Gather
+
+Only those who have registered will be able to join the hackathon in our Gather space. To join the hackathon in Gather you will need to follow these steps:
+
+1. Follow the link below to find the nf-core hackathon Gather space
+ - Please note that the hackathon space will not open until October 7
+2. Enter the email address you used to register for the hackathon
+3. Check your emails for a one-time code and enter it into the Gather window
+4. Enter the space and enjoy the hackathon!
+
+ Launch Gather
+
+# Interesting Bytesize talks
+
+We have recorded bytesize talks in the past few months going over some of the details of tasks we will be tackling during the hackathon. Take a look if you would like to learn more:
+
+- [GitHub contribution basics](https://www.youtube.com/watch?v=gTEXDXWf4hE&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=4)
+- [DSL module development](https://www.youtube.com/watch?v=ggGGhTMgyHI&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=5)
+- [DSL2 Coding style recommendations (Part 1)](https://www.youtube.com/watch?v=KnYPzZ0Dd-Y)
+- [Updates on the new DSL2 syntax](https://youtu.be/17NqUsh73BU)
+- [Adding modules to nf-core/modules](https://www.youtube.com/watch?v=Wc4A2tQ6WWY&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=7)
+- [How to use modules in a pipeline](https://www.youtube.com/watch?v=tWvou0xj9wA&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=6)
+- [Modules test data](https://www.youtube.com/watch?v=QXfAerydAT0&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=17)
+- [Test modules](https://www.youtube.com/watch?v=pjhscKyWH74&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=18)
+- [Where do I start writing my own DSL2 pipeline?!](https://youtu.be/Z_uPj7fAes8)
+- [Troubleshooting a failed pipeline](https://youtu.be/z9n2F4ByIkY)
+- [Development environments & workflows (Phil)](https://www.youtube.com/watch?v=XB96efweCLI&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=12)
+- [Development environments & workflows (Maxime)](https://www.youtube.com/watch?v=OF55x-FT5WE&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=19)
+- [gitpod.io](https://www.youtube.com/watch?v=kBoC6QBU-M0)
+- [Nextflow Tower CLI](https://youtu.be/MggFf15vGCw)
+
+# Groups
+
+To keep work and discussion during the hackathon manageable, we will organise everyone into groups.
+You are free to change groups as you wish during the hackathon.
+
+Since the Nextflow Summit is going to be held just after the hackathon, we decided to keep presentations to a minimum. There will however be some workshops. More details about those will be given closer to the event.
+
+We will coordinate our work and the issues we are working on using a single GitHub _project board_ (we'll post a link to here closer to the time). There will be a separate tab for each group where will have collated a list of relevant issues.
+
+## Group 1 - Documentation
+
+This group will focus on all kind of documentation, from pipelines themselves to modules and general documentation to aid in getting started with nf-core and Nextflow. We are also planning on making introduction videos for each pipeline.
+
+[ `#hackathon-oct-2022-documentation`](https://nfcore.slack.com/archives/C041QU9SX09)
+
+### Group leaders
+
+- Abhinav Sharma (in person),
+- Marcel Ribeiro-Dantas (in person),
+- Franziska Bonath (online)
+
+## Group 2 - Subworkflows
+
+Why stop at sharing modules, we want to share subworkflows between pipelines too! Join this group to work on the cutting edge of nf-core development, fleshing out details about how this functionality will work and building proof of concept code to use as a model for the rest of nf-core. Recommended for relatively experienced Nextflow / nf-core contributors only.
+
+[ `#hackathon-oct-2022-subworkflows`](https://nfcore.slack.com/archives/C0419AX2RD3)
+
+### Group leaders
+
+- Edmund Miller (in person),
+- Harshil Patel (in person),
+- Maxime Garcia (in person),
+- Yuk Kei Wan (online)
+
+## Group 3 - Pipelines
+
+This group will focus on any pipeline related work, this could be:
+
+- DSL2 conversion of a pipeline
+- New / improved features for a pipeline
+- Entirely new pipelines
+
+[ `#hackathon-oct-2022-pipelines`](https://nfcore.slack.com/archives/C0415MM9FHC)
+
+### Group leaders
+
+- Maxime Garcia (in person),
+- James Fellows Yates (in person),
+- Friederike Hanssen (in person),
+- Luis Kuhn (online)
+
+## Group 4 - Modules
+
+This group will welcome anyone who wants to work on new modules or improve existing ones.
+
+[ `#hackathon-oct-2022-modules`](https://nfcore.slack.com/archives/C0415MQNW14)
+
+### Group leaders
+
+- Jose Espinosa-Carrasco (in person),
+- Gisela Gabernet (in person),
+- Louisa Perelo (online)
+
+## Group 5 - Infrastructure
+
+The nf-core website is in desperate need of work, and central tooling always needs to be improved and maintained. If this sounds like a fun task, join group 5!
+
+[ `#hackathon-oct-2022-infrastructure`](https://nfcore.slack.com/archives/C040XNUL7DM)
+
+### Group leaders
+
+- Matthias Hรถrtenhuber (in person),
+- Jรบlia Mir Pedrol (in person)
+
+## How we will work
+
+We will be a lot of people working in parallel during this hackathon, so to stay organised we have a recommended workflow:
+
+1. :speech_balloon: Chat with your group to get an overview of what is going on
+2. Join the relevant Slack channel to stay up to date and discuss with your project members
+3. Find a task to work on using the GitHub Project Board
+ - If you have something you want to do that's not there, please make an issue and add it to the board
+4. :raising_hand: Assign yourself to the issue that you're currently working on (preferably one issue at a time)
+ - This is so that multiple people don't accidentally work on the same task
+5. :fast_forward: When you're done, make a pull-request with your changes. Link it to the issue so that the issue closes when merged.
+6. :page_facing_up: Describe your work on the HackMD document for the project and tell the group! :tada:
+7. :recycle: Repeat!
+
+> The HackMD document is the easiest to forget, but please add something even if you think what you did was small -
+> we will use it in the group check-outs for each day and also in the reporting after the event so it's important for us :bow:
+
+# Schedule
+
+
+
+# Venue: Torre Glรฒries
+
+The in-person hackathon will be taking place in Torre Glรฒries, Barcelona.
+
+[๐๐ป Google Maps ๐บ๏ธ](https://goo.gl/maps/f437pZnbeZpyYSdHA)
+
+Instructions for finding us:
+
+1. Use the entrance with the green carpet and a Summit poster
+2. Find either Lift 7 or Lift 8 _(only these lifts!)_
+3. Floor 25
+4. Turn left and follow the corridor around past the coffee machine
+5. We are in rooms:
+ - Eixample (41 people)
+ - Tibidabo (14 people)
+ - La Barceloneta (6 people)
+6. Hack!
+
+| Room name | Door | Inside |
+| -------------- | ------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- |
+| Eixample | ![Eixample](/assets/markdown_assets/events/2022/hackathon-october-2022/eixample.jpeg) | ![Eixample inside](/assets/markdown_assets/events/2022/hackathon-october-2022/eixample_inside.jpeg) |
+| Tibidabo | ![Tibidabo](/assets/markdown_assets/events/2022/hackathon-october-2022/tibidabo.jpeg) | ![Tibidabo inside](/assets/markdown_assets/events/2022/hackathon-october-2022/tibidabo_inside.jpeg) |
+| La Barceloneta | ![La Barceloneta](/assets/markdown_assets/events/2022/hackathon-october-2022/la_barceloneta.jpeg) | ![La Barceloneta inside](/assets/markdown_assets/events/2022/hackathon-october-2022/la_barceloneta_inside.jpeg) |
+
+Video of walking to the hackathon rooms:
+
+
+
+# Community Streaming Events
+
+Community streaming events are a great way to engage with your local communities while participating in events remotely. By simply booking a room and gathering your colleagues you can create discussion and build your community while enjoying the training, nf-core Hackathon, and/or Nextflow Summit together.
+
+If this is something you are interested in you can tell us about your plans using the link below. We will send you extra material to help you prepare and promote your local event. To get started we have a [poster](https://docs.google.com/document/d/1wQDtrVUv1Lro5lZFmeyfaPgJUI2pmGojtL9tWlQiVyQ/edit) that you can download, edit, and share.
+
+Keep an eye out for others hosting community events in your area. We will be sharing information about community events and how to get in contact with hosts on our website.
+
+You can add your community event using [this link](https://seqera.typeform.com/streaming-event).
+
+## Social Activities
+
+During the hackathon, we will have a few light-hearted fun and games!
+
+### Onsite
+
+- Monday evening: Beers and pizza (non-alcoholic options available) at the Seqera Office!
+- Tuesday evening: Dinner!
+- The traditional sock hunting in Gathertown will also be held in person! Take pictures of as many socks as you can find.
+- Finally, on Wednesday, we will be running a short quiz at 10am with Kahoot!
+
+### Online
+
+- Throughout the three days, we will once again be running a nf-core hackathon **bingo**! To join the game, you can go the following [link](https://nfcore-bingo.web.app/?game=nf-core-hackathon). Check the instructions at the bottom of the page.
+
+ > Bingo!
+
+- In addition, we will be running a sock hunt once more! There are 11 socks distributed around the gather.town world. Take screenshots of as many as you can find!
+- Finally, on Wednesday, we will be running a short quiz!
+
+All social activities are of course optional, but hope to see as many people joining in as possible :tada:
+
+# COVID control
+
+Please remember that the COVID-19 pandemic is not over.
+If you intend on joining the hackathon in person, we request that you get vaccinated and tested, if possible. We ask for anyone who displays symptoms to not mingle with other participants. Whilst it is not feasible for us to enforce any rules, we request that attendees behave responsibly.
+
+We will be monitoring the status of the pandemic as the event draws closer and reserve the right to make changes in our policies if we deem that to be necessary.
+
+# Safety Information at the Hackathon
+
+Please note that our [Code of Conduct](https://nf-co.re/code_of_conduct) applies to the Hackathon, and all participants need to abide by our guidelines to participate. We should all feel responsible for making nf-core events safe and fun for everyone.
+
+In addition, please respect the following at all times during the hackathon:
+
+- Do not take screenshots of groups you are working within without asking for consent of all individuals in the group. Remember, not everyone wants their photographs shared on social media.
+
+## Reporting CoC Violations during the hackathon
+
+In case of an immediate perceived threat at the hackathon, please reach out to any of the following individuals on Slack. Their names and their location (online/onsite) are indicated.
+
+### Safety officers
+
+- Cris Tuรฑรญ (onsite)
+- Michael Heuer (online)
+- Saba Nafees (onsite)
+
+You can also report any CoC violations directly to safety [at] nf-co [dot] re. Our safety officers will contact you to follow up on your report.
diff --git a/markdown/events/2022/summit_2022.md b/markdown/events/2022/summit_2022.md
new file mode 100644
index 0000000000..d7cf026b44
--- /dev/null
+++ b/markdown/events/2022/summit_2022.md
@@ -0,0 +1,37 @@
+---
+title: Nextflow Summit 2022
+subtitle: Join us for the latest developments and innovations from the Nextflow world.
+type: talk
+start_date: '2022-10-12'
+start_time: '09:00+00:00'
+end_date: '2022-10-14'
+end_time: '18:00+00:00'
+location_name: Torre Glรฒries, Avinguda Diagonal, 211, 08018 Barcelona, Spain
+location_latlng: [41.403408, 2.1895932]
+youtube_embed: https://www.youtube.com/watch?v=Zort8tv7iRI
+---
+
+> Organised by [Seqera](https://summit.nextflow.io/2022/)
+
+# Welcome
+
+A showcase of the latest developments and innovations from the Nextflow world held in Barcelona and virtually:
+
+- Nextflow - central tool, language, and plugins
+- Community - pipelines, applications, and use cases
+- Ecosystem - infrastructure and environments
+- Software - containers and tool packaging
+
+# Key dates
+
+- July 7: Registration for the Nextflow Summit opens
+- July 22: Call for talk abstracts closes
+- July 29: Accepted speakers notified
+- September 16: Registration for the Nextflow Summit closes
+- October 10-12: nf-core Hackathon
+- October 12-14: Nextflow Summit
+
+---
+
+All recordings are on [Youtube](https://www.youtube.com/playlist?list=PLPZ8WHdZGxmUdAJlHowo7zL2pN3x97d32).
+We (sanger-tol) gave a presentation on using Nextflow in production in Tree of Life:
diff --git a/markdown/events/2022/training-october-2022.md b/markdown/events/2022/training-october-2022.md
new file mode 100644
index 0000000000..94191405f4
--- /dev/null
+++ b/markdown/events/2022/training-october-2022.md
@@ -0,0 +1,233 @@
+---
+title: nf-core Training - October 2022
+subtitle: A set of global online Nextflow and nf-core training events
+type: training
+start_date: '2022-10-03'
+start_time: '05:00+02:00'
+end_date: '2022-10-05'
+end_time: '21:30+02:00'
+location_name: YouTube
+location_url:
+ - https://youtube.com/playlist?list=PL3xpfTVZLcNiqYQ41g0fvyTQazpafFvOn
+---
+
+> Organised by [Seqera and nf-core](https://nf-co.re/events/2022/training-october-2022/)
+
+# Welcome
+
+Join us from October 3-5, 2022 for the Nextflow and nf-core training event!
+
+The training is entirely virtual and free and will cover the fundamentals of using Nextflow and nf-core. The training will offer something for all skill levels but will be especially useful for those who are new to Nextflow and the nf-core community, or if you are thinking about joining the [nf-core Hackathon](https://nf-co.re/events/2022/hackathon-october-2022) (October 10-12, 2022) for the first time.
+
+# Schedule
+
+The training will be held across three consecutive days in 2.5-hour sessions (including time for a small break and questions).
+
+Because nf-core is a global community we are making the training geographically accessible and will be holding separate sessions for different timezones:
+
+
+
+
+
+
Region
+
Session 1
+
Session 2
+
Session 3
+
+
+
+
+
:earth_asia: APAC (Asia-Pacific)
+
+
+
+
+
+
:earth_africa: EMEA (Europe, the Middle East, and Africa)
+
+
+
+
+
+
:earth_americas: AMER (North, Central, and South America)
+
+
+
+
+
+
+
+
+_(All times shown relative to your web browser's timezone)_
+
+You are welcome to attend whichever sessions work best for you.
+
+# Training links
+
+- Nextflow & Nextflow Tower training:
+ - [Training material](https://training.seqera.io/)
+ - [GitPod environment](https://gitpod.io/#https://github.com/seqeralabs/nf-training-public)
+- nf-core training:
+ - [Written tutorial](https://nf-co.re/docs/contributing/tutorials/creating_with_nf_core)
+ - [GitPod environment](https://www.gitpod.io/#https://github.com/nf-core/tools)
+
+# Streaming links
+
+Training will be live streamed on [YouTube](https://youtube.com/playlist?list=PL3xpfTVZLcNiqYQ41g0fvyTQazpafFvOn). Streaming links can be accessed below.
+
+
+
+# Prerequisites
+
+Prior experience with Nextflow and nf-core is not required to attend. However, to get the most out of the training, we recommend that you are comfortable using the command line and have a basic understanding of scripting language concepts.
+
+The training material will be available in a preconfigured development environment in [Gitpod](https://www.gitpod.io/). This environment will contain all of the data and tools required to run Nextflow and participate in the training fully.
+
+To be able to use [Gitpod](https://www.gitpod.io/) for the training you will need:
+
+- An internet connection
+- An internet browser (E.g., Google Chrome or Firefox)
+- GitHub account (sign up for free: https://github.com/signup)
+- Slack (sign up to the nf-core Slack for free: https://nf-co.re/join/slack)
+
+# Asking questions
+
+We anticipate that many of you will have questions throughout the training. To help manage these questions we have created dedicated channels on the nf-core Slack that our team members will monitor. The benefit of this is that we will be able to reply in individual threads rather than one big chain where some questions and answers may get lost.
+
+If you are not already a part of the nf-core slack organization you can join using [this link](https://nf-co.re/join/slack). The dedicated channels for questions can be found by searching All Channels, which is at the top left corner of your Slack window. To keep noise down at different times of the day we have created separate channels for each time zone group and are linked below.
+
+- [`#training-oct22-apac`](https://nfcore.slack.com/archives/C043PN0BE1L)
+- [`#training-oct22-emea`](https://nfcore.slack.com/archives/C0448VBG30R)
+- [`#training-oct22-amer`](https://nfcore.slack.com/archives/C0449003V9P)
+
+We will do our best to answer all questions at the time they are posted. However, because of the large number of attendees, we anticipate it may take some time to answer everyone. Our priority will be to keep everyone moving forward during the training, but will come back to questions that are missed during the training session.
+
+# Registration
+
+Click on the link below and fill out the registration form to secure your place!
+
+Register now
+
+# Community Streaming Events
+
+Community streaming events are a great way to engage with your local communities while participating in events remotely. By simply booking a room and gathering your colleagues you can create discussion and build your community while enjoying the training, nf-core Hackathon, and/or Nextflow Summit together.
+
+If this is something you are interested in you can tell us about your plans using the link below. We will send you extra material to help you prepare and promote your local event. To get started we have a [poster](https://docs.google.com/document/d/1wQDtrVUv1Lro5lZFmeyfaPgJUI2pmGojtL9tWlQiVyQ/edit) that you can download, edit, and share. We have also created a dedicated [`#community-stream-events`](https://nfcore.slack.com/archives/C03Q3RE0RJQ) Slack channel where you can ask questions and share information about your event.
+
+Keep an eye out for others hosting community events in your area. We will be sharing information about community events and how to get in contact with hosts on our website.
+
+You can add your community event using [this link](https://seqera.typeform.com/streaming-event).
+
+# Code of conduct
+
+Please note that by attending the training event you are agreeing to abide by our [Code of Conduct](https://nf-co.re/code_of_conduct).
diff --git a/markdown/events/2023/bga-treeval-2023.md b/markdown/events/2023/bga-treeval-2023.md
new file mode 100644
index 0000000000..cf281fc26c
--- /dev/null
+++ b/markdown/events/2023/bga-treeval-2023.md
@@ -0,0 +1,32 @@
+---
+title: 'The Treeval pipeline: Generating evidence for manual curation'
+subtitle: Damon-Lee Pointon, Ying Sims, Will Eagles
+type: tutorial
+start_date: '2023-09-08'
+start_time: '14:00+01:00'
+end_date: '2023-09-08'
+end_time: '15:00+01:00'
+location_name: Biodiversity Genomics Academy 2023
+location: https://bga23.org/
+youtube_embed: https://youtu.be/gAzpMN87adk
+---
+
+> Organised by [Biodiversity Genomics Academy](https://bga23.org/)
+
+# Introduction
+
+Biodiversity Genomics Academy 2023 (BGA23) is a series of free, open to all,
+online-only, short, interactive sessions on how to use the bioinformatics
+tools and approaches that underpin the [Earth Biogenome Project (EBP)](https://earthbiogenome.org/).
+
+# Description
+
+TreeVal is a pipeline created to generate supplemental information which will aid in the curation of high-quality genomes.
+
+By the end of this session you will be able to:
+
+1. Install the treeval-pipeline on your own system
+2. Understand the data currently required for TreeVal
+3. Start a treeval and curationpretext run
+4. Understand what the different components of the pipeline do
+5. Upload results to JBrowse and look at our data.
diff --git a/markdown/events/2023/bytesize_crisprseq.md b/markdown/events/2023/bytesize_crisprseq.md
deleted file mode 100644
index 7e72c6c537..0000000000
--- a/markdown/events/2023/bytesize_crisprseq.md
+++ /dev/null
@@ -1,89 +0,0 @@
----
-title: 'Bytesize: nf-core/crisprseq'
-subtitle: Jรบlia Mir (QBiC, Tรผbingen University) and Marta Sanvincente (Pompeu Fabra University, Barcelona)
-type: talk
-start_date: '2023-02-14'
-start_time: '13:00 CET'
-end_date: '2023-02-14'
-end_time: '13:30 CET'
-embed_at: 'crisprseq'
-youtube_embed: https://www.youtube.com/watch?v=x_eFQW0nNvo
-location_url:
- - https://doi.org/10.6084/m9.figshare.22100045.v1
- - https://www.youtube.com/watch?v=x_eFQW0nNvo
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/crisprseq
-
-This week Jรบlia Mir ([@mirpedrol](https://github.com/mirpedrol)) and Marta Sanvincente ([@msanvicente](https://github.com/msanvicente)) present the newly released nf-core pipeline nf-core/crisprseq.
-Nf-core/crisprseq is a bioinformatics best-practice analysis pipeline for the analysis of CRISPR edited next generation sequencing (NGS) data. It allows the evaluation of the quality of gene editing experiments using targeted NGS data.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=1)
-Hello everyone to this week's bytesize talk. I'm very happy to welcome today Julia from QBiC in Tรผbingen and Marta from UPF in Barcelona. They're going to talk about another new pipeline that was released just a week ago called crisprseq. Off to you.
-
-[0:22](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=22)
-Thank you. Thanks for the introduction. We'll present nf-core/crisprseq, which is a pipeline for the analysis of CRISPR experiments. I would like to start by an introduction to what CRISPR is, because I'm sure you've heard that word before, but maybe you don't remember exactly what it is. CRISPR comes from bacteria and the system is repurposed to do gene editing. It consists of a protein that we call Cas, and this protein can cut DNA, creating double strand breaks. It's coupled to a single guide RNA, which is a short sequence of RNA, which is complementary to the DNA region that you want to cut. This way we can have directed cuts.
-
-[1:18](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=78)
-When we have this double strand break in a cell, there are usually two mechanisms of repair. The most common one is this one that we call non-homologous end joining. That's the cell that goes and tries to repair this double strand break, and this can produce some insertions or deletions, which can result in the disruption of the gene, and then this can cause a gene knockout. Then there's a different way, which is called homology-directed repair, which consists on having a template that we can provide and the repair is made based on that template. Like this we can introduce new fragments of DNA and possible gene knock-ins. Apart from these two mechanisms, there's also this microhomology-mediated end joining, which is very similar to the non-homologous, but it happens when there are two small regions of homology surrounding the cut, and these can recombine, so we can get a bigger deletion. More recently, there are these other two technologies called base editing and prime editing, which are done not by a double strand break, but only with a nick. Those are more precise because they can produce base substitutions of only one base.
-
-[3:04](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=184)
-That's the overview of all these CRISPR-Cas experiments that we can have. Apart from that, we can also have CRISPR screens, which consist of a library of different gRNAs targeting lots of different genes, and then we can perform a screening. Finally, if we couple with a CAS protein that's inactive and doesn't cut the DNA, it only affects the expression of the gene, we call this CRISPR activation or CRISPR interference. Our pipeline, crisprseq, can analyze gene knockouts, knock-ins, and also base editing or prime editing experiments. This pipeline is based on a pipeline called CRISPR-Analyzer, which Marta developed, so she'll explain more about it.
-
-[4:04](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=244)
-As Julia has already said, this first release of nf-core/crisprseq pipeline is based on CRISPR-Analytics. Currently, we just have the core of CRISPR-Analytics in crisprseq, which I will show you here. These are the core steps of that pipeline. The first steps are quality pre-processing of the sequencing reads, where different steps are done to remove low-quality reads, and also in the case that we have paired-end sequencing reads, the reads are merged. Then the alignment against the amplicon reference is done, and after that, there is a process where each indel and substitution that could be caused by these genome editing tools are quantified. Finally, some plots and tables are done to allow us to visualize the results. In the next slide, what I want to show you is other optional steps that CRISPR analytics have that are not currently in crisprseq, but we hope that we will be able to add it in the following versions.
-
-[5:33](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=333)
-Just briefly, the first optional step that we have is the ability of using unimolecular identifiers to cluster the sequences, and through that clustering processes we can remove sequencing and amplification biases, as well as correct sequencing errors. We also have implemented a step that allows us to identify the amplicon reference, looking for it in a genome of reference. Then in the bottom part, you have two other steps that has allowed us to increase the precision of our pipeline. The first one is the size bias correction, in which we have implemented a simple model where we used spike-in controls of different sizes and known abundance that were used to model biases related to the amplification, with the sequence size, since longer deletions will lead to shorter sequences that will be amplified more times than longer ones. Then if we also sequence mock samples or a negative control, we can use this sample to subtract errors that can be also represented in our treated samples.
-
-[7:17](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=437)
-You can choose the alignment that you want to use in the alignment step, but we have been exploring with simulated data sets the performance of different alignments together with the following part of quantifying the different edits. What we have done is to optimize the parameters of minimap to achieve better results related to the identification of the indels produced by the double strand break repair mechanism. In the following slide, we have just some examples of CRISPR-Analytics being used to analyze a bunch of samples. We have analyzed samples from three different cell lines that were edited with CRISPR-Cas9. In the first plot we see that the main pattern observed among all the insertions that have been found are homology insertions, which means that the same insertion that is in the cleavage site has been also added in this repair process. This happens with higher frequency when the nucleotide that we have free is an adenine or a thymine. As in the other two plots, what we have been exploring is the precise outcomes, which are those outcomes that are shown in a higher frequency. In that case, we also observe that among these precise outcomes, we have these homology insertions, and also we have some deletions of a cytosine when this cleavage site is surrounded by cytosines, and also we can see some micro-homology patterns that have lead to longer deletions that have also a higher representation in these samples.
-
-[9:43](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=583)
-CRISPR-Analytics has been benchmarked using several datasets. We have used real data as well as simulated data, and we also created a ground truth dataset to be able to also have this dataset for the benchmarking. This ground truth dataset was generated by several collaborators, which had different subsets of reads, and they were classifying the indels that were found in the reads as indels produced by errors or indels produced by genome editing tools. Finally, these subsets have been used to calculate the percentage of addition of those samples, and we have extrapolated this percentage to calculate the distance between the percentages reported by different tools and the real distance or the established percentage of addition with this ground truth dataset. From this, we just want to highlight that our tool has good precision without relying on the addition windows. Most of the tools use a window where the edited indels have to take place to avoid reporting false positive events.
-
-[11:38](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=698)
-How you can use nf-core/crisprseq? Basically, you can use the typical nextflow command, where you provide an input sample sheet, the output directory of the profile that you want to run the pipeline with, and then we also have this one single parameter to provide the aligner, by default we're using minimap but you can also choose between bwa or bowtie2, and the reason why we don't have more parameters is because most of them are provided with the sample sheet because they are dependent on the sample.
-
-[12:23](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=743)
-That's how our sample sheet looks. You have the sample name, fastq_1 and fastq_2. If you have only single-end sequencing data you can only provide fastq_1. Then you provide the reference sequence, here it has been shortened for space issues. This reference is the reference the reads will be aligned to, so it's the region where you directed your cut. You also provide the proto-spacer which is the guide RNA that you used in your experiments to direct the cut. Finally, in case that you performed a homology directed repair experiment, you can also provide the template.
-
-[13:11](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=791)
-That's the structure of the output folder, I won't go to all the directories in detail, but you will find all the outputs of all the tools used for pre-processing like to join paired-end reads. Also the quality filtering steps because we remove sequencing adapters, we remove low quality reads and mask low quality bases also, and then you also have the output of the alignment and finally the most important folder, which is this one called `cigar`. It's called like that because we parse the edits using the cigar field from the mapping. In these directories you will find some tables and summary tables of the edits and also plots.
-
-[14:15](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=855)
-This is an example of the output plots. We report data quality, meaning that you will have a percentage of reads that have good quality, also the ones that were aligned against the reference. We also report the number of reads that were wild type or the ones that contained indels and from these indels we also classify by filter of quality and if they are located in the expected pick on the cut site and if they are above the sequencing error rate or not. Finally there's also classification between insertions and deletions, if there are insertions produced by our template and also if these indels are in frame or out of frame because the ones that will be out of frame are the more probable to disrupt a gene function and produce a gene knockout.
-
-[15:33](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=933)
-Finally these further steps as Marta already commented that they are already implemented in CRISPR-Analytics and we will add them to crisprseq. This unimolecular clustering step to reduce PCR duplicates or sequencing biases, because usually in the sequencing methodology, shorter reads are sequenced more often but this doesn't mean that you have this particular long deletion more represented in your sample so we can correct with UMIs. Then also the automatic identification of a reference and some noise handling and finally also thinking already about version 2 of crisprseq about the idea that we will be able to analyze other kinds of CRISPR experiments such as CRISPR screening. If you have any doubt or want to work with us, Laurence is currently implementing this part of the analysis so you can join the Slack channel and there ask and that's it. Feel free to join this channel, test out the pipeline and see if there's something that you would like to also include. Also check the repository. Thank you.
-
-[17:08](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=1028)
-(host) Thank you very much, that was a very nice talk. Are there any questions in the audience to either Julia or Marta? You can unmute yourself now if you want to, or you can write a question in a chat and I will read it out. There currently seems to be no questions but may I ask one?
-
-(question) So one of the biggest issues that I know of CRISPR is off-target effects, but as I understand you're mapping to fairly short references, just a target. Is there any way how we could figure out if there are off-target effects with this pipeline or is there anything planned in the future?
-
-(answer) This pipeline it's not really thought to be able to detect off-target effects. The experimental steps are based on amplification of your expected target and then you sequence with Illumina or other next-generation sequencing platforms. What you can do is, for example, if you use some prediction of which are the the targets that are more susceptible to be off-targets you can also amplify these off-targets and make the same analysis and see if there are indels in that regions.
-
-(question cont.) But you would need to know what to look for then obviously.
-
-(answer cont.) Yeah, we would have to add guide-seq or other analysis pipelines that you use for another experimental protocol to do the computational analysis. It's something that can be implemented in further steps.
-
-(host) Thank you.
-
-[19:11](https://www.youtube.com/watch?v=x_eFQW0nNvo&t=1151)
-(host) Are there any other questions in the audience? If not I would like to thank you two for this great talk. I also would like to thank the Chan Zuckerberg Initiative who is funding our bytesize talks. If anyone has more questions to both of you, you can always go to slack and check either in the channel for crisprseq or you can also ask in the bytesize channel and I'm pretty sure the two will have a look at your question. Thank you very much.
-
-(speaker) Thank you.
-
-
diff --git a/markdown/events/2023/bytesize_funcscan.md b/markdown/events/2023/bytesize_funcscan.md
deleted file mode 100644
index 3b48bd485a..0000000000
--- a/markdown/events/2023/bytesize_funcscan.md
+++ /dev/null
@@ -1,85 +0,0 @@
----
-title: 'Bytesize: nf-core/funcscan'
-subtitle: Jasmin Frangenberg - Dept. Palaeobiotechnology, Leibniz Institute for Natural Product Research and Infection Biology Hans Knรถll Institute
-type: talk
-start_date: '2023-01-24'
-start_time: '13:00 CET'
-end_date: '2023-01-24'
-end_time: '13:30 CET'
-embed_at: 'funcscan'
-youtube_embed: https://www.youtube.com/watch?v=c1CnE6jPhpg
-location_url:
- - https://www.youtube.com/watch?v=c1CnE6jPhpg
- - https://doi.org/10.6084/m9.figshare.21953978.v1
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nf-core.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/funcscan
-
-This week, Jasmin Frangenberg ([@jasmezz](https://github.com/jasmezz)) is going to introduce nf-core/funcscan. nf-core/funcscan is a bioinformatics best-practice analysis pipeline for the screening of functional components of nucleotide sequences such as assembled contigs. This includes mining for antimicrobial peptides, antibiotic resistance genes and biosynthetic gene clusters.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=1)
-(host) Hello everyone and welcome to this week's bytesize talk. With us is Jasmin Frangenberg. I'm very happy that you're here. Thank you very much. She's going to talk about yet another new pipeline that is going to be released very soon, which is nf-core/funcscan. Off to you, Jasmin.
-
-[0:22](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=22)
-Yes, thank you very much. I will introduce this pipeline to you now, which is an nf-core pipeline to screen for functional components of nucleotide sequences from prokaryotic genomes or metagenomes. What are these functional components that we are interested in or that we screen for? The pipeline screens on the one hand for antimicrobial peptides. These are important in innate immunity and they are very short sequences, peptides out of about 20 amino acids, so you can find them even in small or fragmented DNA and metagenomes. The same applies to antibiotic resistance genes. On the other hand, biosynthetic gene clusters, here at the bottom. They are quite big, because they consist of a whole gene cassette, which codes for a whole metabolic function, secondary metabolites or natural products.
-
-[1:24](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=84)
-Who would be interested in such a pipeline, which identifies these compounds? In natural product discovery, where you can identify these compounds to develop therapeutics, in antibiotic research, in environmental metagenomics, or simply to have functional and genomic annotations. In these research fields, the detection of these compounds is already being done with a couple of tools, however, there are certain issues. One of them would be the efficiency, because mostly you apply the tools manually and then you only have a very specific purpose of the tool. You can identify a single compound, but it's not very broad and you have only a single algorithm that identifies the output. It could be more feasible to have this whole process streamlined in a pipeline. Also the output of these tools is not standardized. Another issue would be the reproducibility, because throughout the years, the tools develop new functions, bugs are fixed. It's very important for researchers to record which versions of which tools they are using, which is hard if you execute them manually on your samples. Also data privacy, there are a bunch of tools that offer web services where you can upload your data where they are analyzed for you. However, this requires that you give your data to a third party, which is not always intended or even possible. Another issue is that bioinformatics skills are often needed. Sometimes you even have to write small bash scripts to execute the tools on your data, which is not possible for all people. For example if they are biochemists who just want to know what is in the data, they don't want to be trained bioinformaticians.
-
-[3:30](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=210)
-These are the many problems that our pipeline tackles, namely that it is very scalable since it's a Nextflow pipeline, all nf-core pipelines are Nextflow pipelines. They are very efficient and scalable. You can execute them on your local computer, laptop, up to the institute's HPC. They are reproducible since they record all the tools and versions of the tools. Of course, you can decide where you want to have your data, you are not forced to put them on any web server. Also, it is very easy to execute the pipeline, which you will see later when we come to the tutorial part.
-
-[4:11](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=251)
-I emphasized how easy the pipeline is to use, but it didn't start very easily. I go back to October 2021, when we assembled the ideas to develop a pipeline of many tools, we brainstormed what would be needed for obtaining the resistance genes, the biosynthetic gene clusters and the AMPs. Not all tools were yet on Conda or had nf-core modules. We had to do a lot of work there. Then throughout the next year, we streamlined the process a bit and the ideas got clearer. We even made the first sketch of the famous tube map sketch. Finally in 2023, the pipeline is ready to use. This is the current version.
-
-[5:05](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=305)
-I will walk you through it. In the first step, we have the input which is being annotated. As I said, input can be any genome sequence, could be metagenome, contigs, could also be complete bacterial genomes. This data is then analyzed by one of the three tools, the annotation tools. After this, this data goes into one or all of the three workflows. The antibiotic resistance genes in the yellow workflow, the BGCs in purple and the antimicrobial peptides in red. Not all of the downstream tools need the annotated data. For some, we also use the direct input data.
-
-[5:50](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=350)
-Then as I said, each of the workflows has a bunch of tools. For example, the AMP workflow has four tools. As I mentioned before, they follow different strategies. Some of them use, for example, deep neural networks and machine learning to identify compounds of AMPs, which would be, for example, ampir or here DeepBGC for the BGC workflow. Other tools have rule-based strategies. There are a lot of algorithms predicting the compounds and the results are then very diverse, as you can imagine. It is important to aggregate these outputs and summarize them into a nicely readable format which is the third step. For this, we use one tool per workflow, two of them are developed by ourselves โ AMPcombi and comBGC โ and hAMRonization was already a tool available.
-
-[6:51](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=411)
-This was the overall workflow and now I would like to show you how to apply the pipeline and you will see that is really very easy. We start with the input, which is a sample sheet, basically a table with two columns. The first one is your sample name, the second one is the path to your FASTA input file. Of course your FASTA file includes the ID of your sequence and the sequence itself. This is what you need to actually run the pipeline and it is as easy as running `nextflow run nf-core/funcscan`. You give your input sample sheet, give your output directory. This is a minimal example of a pipeline run. Of course, it is recommended to use more parameters. One of them would be in the annotation step, the flag `--annotation_tool`, where you can decide which tool you want to use. They have different properties. For example, prodigal is very fast, however, we noticed that with prokka we get better downstream results. It depends on your needs and ideas, which tool you would like to choose. The default is prokka.
-
-[8:00](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=480)
-After the annotation step, we come to the actual identification of the compounds. You can activate each workflow with this flag `--run_amp_screening`, for example, for the AMPs. And by activating this, all the AMP tools are run on your data. You can also choose, for any reason, to deactivate any of the tools. You can switch them off with the flag `--amp_skip` and then the name of the tool. This might be because some tools might be very slow or you think they are so specific that you are not interested in the output. As I said, for whichever reason, you can switch them off. This is the same for the antibiotic resistance workflow. You can apply this flag, it runs all the four or five tools on your data and you can skip any tool with the `--arg_skip` flag. Same applies for BGC identification. You have the flag, all the tools are run, you can skip whichever you might want to skip. Of course, you can use not only one of the flags per run, but all three flags at the same time. Your data is investigated simultaneously and parallelized as much as possible with Nextflow. Okay, so these are the identification steps.
-
-[9:21](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=561)
-Now we come to the summary steps for each workflow. Let's start with the antibiotic resistance, which is done by hAMRonization, which is a tool that is already out there. Here you can see the GitHub link. This tool can actually summarize a bunch of outputs of resistance identification tools. Our pipeline currently includes the orange tagged ones. The output of those tools is then summarized into a standardized gene report. This is how it looks. It's a table with a lot of columns. You have here the sample IDs, then the genes that have been identified, some information about the databases, which tools were run, and so on. These are actually all the column headers that are very conclusive and you can use this output table for downstream analysis in R or any statistics program.
-
-[10:17](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=617)
-This is very similar to AMPcombi, which we developed ourselves, Anan and Louisa developed this, where you also have your sample IDs and then some information about probability of AMPs. Additional feature is that it not only identifies your antimicrobial peptides, but it also does some back aligning to a reference database to identify taxonomic classification. It also infers some chemical properties like stereochemistry and provides the publication so you can go back and read more about the compound identified. The last tool for the BGC workflow is comBGC. Similar fashion, we have the sample IDs, the tools which have been applied, and then more information about your candidate biosynthetic gene clusters. With this, you see that we have a scalable workflow now to identify these compounds, which are important for a couple of research fields for, as I said, drug development, antibiotic research and so on.
-
-[11:28](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=688)
-Since the pipeline is almost ready, it's probably going to be released next week. Let's see about it. We have at least added all the modules and subworkflows. We do some more testing and then the pull request will go out. I can already advertise if there is someone here in the chat, who would like to review, please feel free to reach out to us on Slack. In the future, we would like to include more screening modules and to also have a visual summary of the output, which would be a graphical dashboard, probably with a Shiny app. Let's see about that.
-
-[12:11](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=731)
-With that, I would like to introduce the development team, which is James, Louisa, Anan, Moritz and me. Of course, we got a lot of help from the nf-core community, which was always assisting, very nice community. Also I would like to emphasize some colleagues here at my institute, which helped with biological and biochemistry knowledge. My supervisor, Pierre Stallforth from the Leibniz HKI. With this, I would like to close and lead you to our repository and the documentation of the pipeline. If you want to interact with us, feel free to join us on Slack and otherwise I'm open for questions either now or later on Slack. Back to you, Franziska.
-
-[13:03](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=783)
-(host) Thank you very much. Very interesting. Anyone can now unmute themselves if they have any questions, they can also post questions in the chat and then I will read them out. Are there any questions from the audience? Otherwise I actually have a question.
-
-(question) You have shown a minimal command that you can run, that doesn't actually specify the workflow that it's using. Is that going to use all three workflows or a specific one, a default?
-
-(answer) This one you mean? Exactly. In the default we have specified none. This would actually run only the annotation, which is probably not very useful for you. This is the current state of the settings. Maybe we will change this later. I don't know.
-
-[14:05](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=845)
-(question) Right. Would it make sense to run all three workflows at the same time or is that different kinds of samples?
-
-(answer) No, no, that's what it's designed for, to run efficiently on all three workflows. It depends on your interest: If you are not interested in the resistance genes, then of course you don't need to run it, but it's very efficient to use this also.
-
-[14:26](https://www.youtube.com/watch?v=c1CnE6jPhpg&t=866)
-(host) Thank you. Are there any more questions at this moment in time? Otherwise, I thank you again. It was a very nice talk. Of course I would also like to thank the Chan Zuckerberg Initiative for funding our bytesize talks and our audience for listening to the talk. I hope to see everyone next week. Thank you very much. Bye.
-
-
diff --git a/markdown/events/2023/bytesize_hackMD_reveal_js.md b/markdown/events/2023/bytesize_hackMD_reveal_js.md
deleted file mode 100644
index 642d1e88f1..0000000000
--- a/markdown/events/2023/bytesize_hackMD_reveal_js.md
+++ /dev/null
@@ -1,102 +0,0 @@
----
-title: 'Bytesize: HackMD and reveal.js'
-subtitle: Maxime Garcia, Seqera Labs
-type: talk
-start_date: '2023-04-18'
-start_time: '13:00 CEST'
-end_date: '2023-04-18'
-end_time: '13:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=OqAKuwwNFf0
-location_url:
- - https://doi.org/10.6084/m9.figshare.22656934.v1
- - https://www.youtube.com/watch?v=OqAKuwwNFf0
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/HackMD and reveal.js
-
-HackMD.io is an open source, collaborative Markdown editor, which allows people to share, comment, and collaborate on documents. At nf-core we use HackMD extensively during hackathons and for notekeeping.
-This week, Maxime Garcia ([@maxulysse](https://github.com/maxulysse)) will show us how to create Presentation Slides on HackMD using the `reveal.js` integration.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://kth-se.zoom.us/j/68390542812&t=1)
-(host) Hello, everyone, and welcome to being back at the bytesize talks. Since we had a bit of a break over Easter. Today we start the series with Maxime from Seqera Labs, and he is going to talk about how to use HackMD to present slides. Off to you.
-
-[0:24](https://kth-se.zoom.us/j/68390542812&t=24)
-(speaker) Thank you, Fran, for the introduction. Hello, everyone. I'm Maxime Garcia working at Seqera Labs, and I'm going to present one of the tools that we use quite often during the hackathon and everything. Which is HackMD and reveal.js. Let's start with the presentation mode, because I think this is the most common stuff. The usual disclaimer, I'm mainly covering my own usage within nf-core or what I usually do on the side, but you can do much more. Don't hesitate to investigate and explore, I think it's fun. Yes, this is messy, but it's fine.
-
-[1:09](https://kth-se.zoom.us/j/68390542812&t=69)
-First, Markdown. Because all of the stuff that we're doing is in Markdown. What is Markdown? It's a lightweight markup language. It means that it's using small tags to do stuff, but most of the time, it uses just tiny symbols as the tags. The key point is readability. Because if you can read it... this is markdown... it's understandable, at the same time, what you're seeing here is what you see there, and you can understand what you see and what you read. For me, that's one of the key points in Markdown, it's readability. If you compare that to LaTeX or some other language, which are less readable, Markdown is super high in readability. It's super easy to convert into HTML, and then PDF, and it's widely used in all of the nf-core documentation and the nf-core website. You've noticed some tiny issues around the site, but I will come to that later. Don't worry about it.
-
-[2:18](https://kth-se.zoom.us/j/68390542812&t=138)
-Quick links for Markdown, I will of course share the slides later. It will be super easy to follow the slide to follow the links and everything. Quick links, I think I have one first quick reference to the Markdown syntax, which is fairly simple, a more complete Markdown cheat sheet, which goes way more into detail. Then some more documentation for the GitHub-flavored Markdown, which is an extension of the regular Markdown.
-
-[2:58](https://kth-se.zoom.us/j/68390542812&t=178)
-What is HackMD? HackMD is a tool for real-time collaboration of Markdown documents. It's widely used during hackathons and nf-core bytesize presentations. It means I can edit a document. Just like that. Real-time edit. If someone else has the same URL as I, we can all edit the same document, exactly what we do with Google docs. It's all in Markdown, so it's super easy to do that. HackMD has the possibility to use reveal.js, which is an HTML presentation framework, which we also use widely during the hackathon and that I use a lot for the bytesize presentation. Reveal.js is another tool and HackMD made it possible to use reveal.js, in it's presentation mode.
-
-[4:00](https://kth-se.zoom.us/j/68390542812&t=240)
-What you do when you have your presentation, what you can do, you can share in slide mode. If you share in slide mode, then you can have your presentation directly as a slides, and that's all. To pass from one slide to another, once you're in slide mode, you either use the arrow on your keyboard - I'm pretty sure it works also with arrows here, or yes, it does - or you can also use the space bar. Reveal.js allows you to do sections and subsections. You can do a lot of stuff within reveal.js. You can use fragments, so if you want to have a multiple steps in the slide that get revealed one after the other, or stuff like that. But usually what I do is super simple slides.
-
-[4:53](https://kth-se.zoom.us/j/68390542812&t=293)
-How to actually use reveal.js to present within Markdown. First it's super simple. I linked how to create a slide deck, which is the HackMD documentation for it. The most important part is to follow syntax, and the most important part in following syntax is to separate the slides. You need to have one empty line, three dashes, and another empty line. That's all. Then I usually use one or two hashes, so h1 or h2 or h3 for details, subtitles. That's all, you do as you want. Sometimes you might need small text, I usually use HTML tags. Small text, or big if you need big, but usually I just need to make things smaller. What I do also, sometimes if I need some really small text, I use also font. I think there is a font tag that you can specify the size of the font. That's super simple.
-
-[6:06](https://kth-se.zoom.us/j/68390542812&t=366)
-If you want to include a picture, if it's available online, you can link it as you usually would link any picture in Markdown. You just need to know the syntax for it. You can use HackMD directly to upload your picture. If I click there, I go to "insert image", go into my download, I can insert one document, and here it is, my document should be inserted, and here it is, I have it in my image line. As you can see, it's super easy to upload a file from your computer directly to HackMD, and this is the syntax to have a picture in Markdown.
-
-[6:55](https://kth-se.zoom.us/j/68390542812&t=415)
-In HackMD, we can also use fontawesome, which is what we use a lot in nf-core to have some... I know what is causing that, so I will explain that later, sorry about that... fontawesome is resources that allows you to use simple icons for different stuff. You can have, here it's for server, but you can have a thing that was also, GitHub should be something well known, Facebook should work as well, and so you have a lot of different things: cloud, cloud should work. If you know it, it's super easy to use. What we can do, but that is regular, so yes, simple. Let me put back what I did before, okay.
-
-[7:57](https://kth-se.zoom.us/j/68390542812&t=477)
-Background, this is super useful whenever you're doing a presentation, you might want to change the background of your presentation. What I usually do there is that I use this dot slide command, and I specify which is my data background. We can add some opacity if we want by using a data background opacity. I use here 0.5, but you can have more or less, so 0.8, 0.1, so you can really play with that. Let's put it back to 5.
-
-[8:34](https://kth-se.zoom.us/j/68390542812&t=514)
-An important thing also we can do with reveal.js is export as PDF, so this we can do only when we're in slide mode. Let me show that to you.. When we're in slide mode, if you scroll down, we can see some links. We can see that I made some changes a few seconds ago, I can see that nf-core is owning this note, because of course I made this note in the nf-core organization. I can edit back the note or I can also print the note. If I print it, it's going to try to print everything as it is, and what we want actually is to print everything as a PDF, so I'm printing to file as a PDF, and okay, and it's printing. I should have my file saved on my side, and it's efficient because you really get one page per slide. I've noticed from time to time some issues. Mainly if you have pictures, like here, that are way too big. In that case I would recommend to go back and forth in between from what you have on your screen and what you have in your PDF, and try to scale down your picture or have your picture on the next page. But apart from that it should work fairly well. Most of the issues that can happen in this case is because you're not following the proper syntax.
-
-[10:16](https://kth-se.zoom.us/j/68390542812&t=616)
-What we can do more with HackMD, you can include your own CSS slide, so your own CSS style, this is what I did here, and I'm guessing this is what is causing me the issues that I have there. I just have been trying that out yesterday, and yesterday I didn't have this kind of issue, so I don't know where it's coming from. I will have to look more into that. What you're doing is just saying that I want HackMD to include this file from HackMD, and this part is exactly correspond to a document there, so this is fairly good. You can also use that to include a simple slide deck. For example what I'm doing here in this presentation, I'm including the last slide deck, and what I'm doing if I go on the page for this slide, I can see directly what is linked here. I can see here this is my own reveal.js style, and I have all my style into HTML tags, and all of the CSS correspond to this slide, which could help you. What I really like about doing that is that you can add your own custom stuff, and what I like about doing that is adding a multi-column possibility, and that helped me using "div". I'm using divs within a div multi-column to present a different... here I'm just presenting three lists into three columns, and that's all. You could really do whatever you want.
-
-[12:08](https://kth-se.zoom.us/j/68390542812&t=728)
-Then I have some more tips. When in doubt, if you have any issue, don't hesitate to add more break lines. I've noticed that it's something that works a lot in my case, because I noticed that reveal.js and HackMD, the combination between the two is very, very dependent on the break line and the syntax. It expects you to follow the proper syntax, and sometimes it's not working very well. If something is not working reload or relaunch. If you can, you can also lint markdown if you're thinking that something is not working. Printing works much better when you're using Chrome, and also following the proper syntax. The less HTML, the better as well. I think the div works well, but apart from that, I'm not sure. I think I noticed some issue with tables and stuff, it's something that you really need to look for. What you can do in reveal.js, you can set up your username. Here in my case, my username is @maxulysse, and that links me, directs me, to myself. Otherwise, if you haven't set up your own username, you will have a something which is more or less un-understandable. As I explained, you can also link directly another slide, which is what I did just here. I think I'm good for questions, so I can open everything.
-
-[13:53](https://kth-se.zoom.us/j/68390542812&t=833)
-(host) Thank you. Now anyone who wants to ask a question can do so. I enabled you to unmute yourself. Are there any questions from the audience?
-
-(question) I had a question. Is it possible to add video files to reveal.js syntax?
-
-(answer) Yes, definitely. reveal.js is just HTML that is presented within JS, so you can link YouTube stuff. I don't do that often because most of the time I'm assuming a presentation should be shared easily in other settings, so I usually don't include videos, but you can do that. I think I did that in a previous presentation. I think other people have done that. Definitely, I think if you have a look at all of our presentation on nf-core, you will find some that link videos.
-
-(question cont.) Okay, thanks.
-
-[14:59](https://kth-se.zoom.us/j/68390542812&t=899)
-(host) Are there any more questions?
-
-(speaker) All right, this is annoying. I will not include the CSS anymore in the stuff. That was super convenient because you don't need to have your own style, but yes it's... or maybe I will try to figure out what is unreliable there.
-
-[15:19](https://kth-se.zoom.us/j/68390542812&t=919)
-(question) Maybe a question from my side or clarification. This is a free software, right?
-
-(answer) Yes, definitely this is a free tool. I think we have something special, because we have an organization, so we might have something there. Oh yes, we have a team plan. I have no idea what it means for nf-core, but we have a team plan there.
-
-(question continued) But if you would want to use it privately or...?
-
-(answer continued) Oh yes, you can use it privately. Like I use it privately as well where I'm making my own list of books to read or that I will share with friends. Yes, you can make your own stuff exactly, I have a couple of presentation as well and yes you can have your own workspace and everything. You can have a private notes and the public notes. Yes, you can do some stuff.
-
-[16:19](https://kth-se.zoom.us/j/68390542812&t=979)
-(host) Perfect, thank you. Ah and there is also a link from James which explains the difference, I guess, between free and paid version of HackMD.
-
-(speaker) That's good because I guess I had definitely no idea what is the difference between the two of them.
-
-(host) Okay cool, thank you very much. Are there any more questions? It doesn't seem so. Then I would like to thank Maxime again for the nice talk today and all of you for listening and of course as usual the Chan Zuckerberg Initiative for funding our bytesize talks. Thank you very much.
-
-(speaker) Thank you.
-
-
diff --git a/markdown/events/2023/bytesize_hgtseq.md b/markdown/events/2023/bytesize_hgtseq.md
deleted file mode 100644
index 5b1892e7e5..0000000000
--- a/markdown/events/2023/bytesize_hgtseq.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: 'Bytesize: nf-core/hgtseq'
-subtitle: Francesco Lescai - Department of Biology and Biotechnology, University of Pavia
-type: talk
-start_date: '2023-03-21'
-start_time: '13:30 CET'
-end_date: '2023-03-21'
-end_time: '14:00 CET'
-youtube_embed: https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69
-embed_at: 'hgtseq'
-location_url:
- - https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69
- - https://doi.org/10.6084/m9.figshare.22317877.v1
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/hgtseq
-
-This week, Francesco Lescai ([@lescai](https://github.com/lescai)) is presenting us the nf-core pipeline hgtseq. nf-core/hgtseq is a bioinformatics best-practice analysis pipeline built to investigate horizontal gene transfer from NGS data.
-It uses metagenomic classification of paired-read alignments against a reference genome to identify the presence of non-host microbial sequences within read pairs, and to infer potential integration sites into the host genome.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=1)
-(host) Hello, everyone, and welcome to this week's bytesize talk. I'm very happy to have with me today Francesco Lescai from the University of Pavia at the Department of Biology and Biotechnology. He is very, very busy in nf-core, and among other things, he also worked with Sarek, but today he's going to talk about another pipeline, which is nf-core/hgtseq, and off to you.
-
-[0:27](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=27)
-Thank you, Franziska. Today I'm going to give you a bit of a background for this pipeline and the motivation that inspired us to initiate this project. I'm going to describe the pipeline components. I'll give you some usage indications on the performance of the pipeline, and then I'll describe a bit of future perspectives, which is our homework, basically. I'm going to start with the acknowledgments here, first to Simone Carpanzano, who's the lead author of this pipeline, but as you might imagine, he's heavily engaged in preparing the defense of his Bachelor of Science now, so he couldn't present today. Mariangela Santorsola, who's a key person in my lab, and she's also contributed to the publication that describes this pipeline. Then this is very important, I think, because the value of the nf-core community is the availability of all the modules that we also have used in our pipeline, so a very important acknowledgement here is to all the authors of the different modules that we have used and which actually make the added value of nf-core so important.
-
-[1:43](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=103)
-Starting from the background of this pipeline, horizontal gene transfer. This is a very known and studied process in biological organisms, and it refers to the transfer of genetic material between two different species when they are in close proximity. This has been very important in evolution because it has contributed to new traits, it creates adaptation to new environments, and also the capability to use new sources in different organisms. It's been crucial in the evolution, as I mentioned, particularly in archaea and bacteria, but not very much has been known about this phenomenon happening in higher organisms like mammals, for example.
-
-[2:36](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=156)
-Our motivation was mostly inspired by a paper several years ago that described the existence or the detection of microbial reads in exome sequencing data in human projects. That paper was really inspiring for us in the sense that it did highlight that microbial sequences have been found in exome sequencing data, which means the coding part of our genome, and it did open a huge lot of questions about these phenomenon in higher organisms, and it definitely needed end-to-end tools to investigate what is happening there. Of course, I put here a funny picture of the microbiome because if you remember the definition I just gave, which is transfer of genetic material between species that are in close proximity, then we and many other mammals are the leading example of this close proximity between different species, and we have a whole set of microorganisms that live with us and contribute to our own biology. Clearly, there's a lot to investigate here.
-
-[3:53](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=233)
-A couple of definitions for the pipeline that we have developed. First of all, when you map next generation sequencing reads to a host genome, and in our example, a human genome, you could have several scenarios. The first scenario, which is the most common, is that if you do paired end sequencing, both mates in the pair map correctly to the host genome. But you can also have a couple of additional scenario. One where only one of the two members of the pair maps and the other is unmapped, and one where both reads in the pair are not mapped to the host genome. We needed a definition for the pipeline, so we have identified these pairs where only one read is mapped to the genome and one is not, as "single unmapped". Then we have defined those where both members of the pair are unmapped as "both unmapped". You will find that these short definitions later on recurring in the picture and the slides that we present in a moment.
-
-[5:05](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=305)
-Of course, the importance of the pair where one mate is mapped and the other is unmapped is that it allows us to make assumptions about a potential integration site. Because of course, we can measure and evaluate the abundance of taxonomic IDs from every read that is not mapped to the host genome. But for those that are members of a pair where one of the two is actually mapped, we can additionally try to infer where that potential integration has happened thanks to the coordinates that we have from the mapping of the mapped member of the pair.
-
-[5:48](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=348)
-This is the pipeline overview. The pipeline I think is relatively straightforward and includes a part dedicated to the alignment of quality control, then the conversion and parsing of the reads that I just illustrated and classification using Kraken at the moment, and then a last phase of reporting. We're going to see each of these steps in the pipeline in a moment. The pre-processing is very important because it's been designed to be plugged downstream to other studies. I made the example of the initial paper that inspired us to develop this pipeline. That was the discovery of microbial reads within human exome studies. Our own idea, particularly because we have also contributed to Sarek, was to plug this type of pipeline downstream into those kinds of pipelines like Sarek. Accepting the bam files of the alignments that have been produced by human exome or whole genome sequencing studies, and then use the pipeline to process all those reads that have not been mapped. But the pipeline also starts from a fastq, so using raw reads, and it does a standard alignment to the host genome using BWA.
-
-[7:10](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=430)
-[interrupted stream]
-
-[7:39](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=459)
-We do this using samtools and using the bitwise flag, 13 and 5. Then we further parse the potential integration for the single unmapped reads using the information from the mapping coordinates of the mapped member of the pair. At this moment, we are using Kraken 2 to classify taxonomically the reads, and in particular we have chosen this tool because we're using the k-mer classification that is given as a sliding window in the NGS read that we are analyzing. Also as a way for interpreting the results and doing further QC on the outcome of the taxonomic classification.
-
-[8:26](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=506)
-These all goes into a reporting phase of the pipeline. We generate traditional Krona plots that are generated per group. If your analysis has one, two, or three different groups, we group the sample of Krona plots per category of your samples. We obviously use MultiQC for the reporting. This also includes the classification of a view of all the reads, thanks to the parsing of Kraken 2 outputs. Then we perform a preliminary analysis using RMarkdown with a parameterized RMarkdown file, which also adds a couple of important information to the preliminary analysis. One is a classification score. We try and use the information that Kraken 2 gives us in the output, in order to give a classification score to each of the reads to further allow us to filter based on the quality of the taxonomic classification. Important information here is how much of the read has been classified and has been assigned to that taxonomic organism, which appears in the result. Then we have also curated from a number of publications a list of contaminants that are known to affect DNA extraction and DNA extraction kits. We have further classified the contaminants depending on their potential role in human diseases as well, because we are particularly interested in analyzing these phenomena in humans.
-
-[10:05](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=605)
-A couple of indications about the usage, this is a typical common line to start the pipeline. We will use the input sample sheet as a comma separate value as most of the nf-core pipelines. Then we use the iGenomes genome indication, we use the host genome there, so this is the first part that performs the host genome alignment. Then we pass on the host taxonomic ID which is used to filter the results in the R Markdown report. Two very important parts of this common line are a path to the Kraken database and a path to the Krona database, which can be either indicated as a path if you have it locally, or as a tar.gz file, which can also be online or in a repository that you might have in a cloud resource. The inputs, as I mentioned in the beginning, can be either raw reads with a FASTQ input as you can see in the first example, or already pre-aligned BAM files that are coming from another pipeline that you can see in the second example of input. Here I also have to say that the database for Kraken is obviously crucial for the classification because the whole point of this pipeline is in assigning a taxonomic classification to the un-mapped reads. The way the Kraken database has been built obviously will have a huge effect on the results that you're able to report. On the taxonomic IDs that you're able to detect in your reads.
-
-[11:56](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=716)
-A couple of words about the performance. We have tested this pipeline on different species, to demonstrate the existence of the phenomenon in not only humans, but also in other mammals. This is an overview of the execution of the pipeline on 10 exomes from humans. You can see that they are executed in our local cluster in about three hours. This is quite good. The pipeline is very smooth in its run. Then we have also reported CPU and memory usage for the most intensive tasks. There's nothing major to discover here, I mean, in particular, in terms of memory. Kraken and QualiMap are also quite intense. Again, the amount of memory that is used by Kraken definitely depends on the database that is used for the classification. QualiMap is known to be quite greedy with the memory. In Sarek, it has been swapped with mosdepth. We might do the same in a future version of the pipeline for the same reasons.
-
-[13:10](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=790)
-Homework, mostly. As I mentioned, Kraken has a very useful type of output where you can appreciate the assignments to taxonomic IDs by a sliding window of the k-mers the reader has been splitted into. This will allow us to draw much more information in terms of classification filters or heat maps that will allow us to investigate better the biology, especially regulating this type of events. We will probably dedicate some work to the optimization of the computing part of the pipeline. I just mentioned the issues with QualiMap. Certainly improvement on the preliminary analysis report, which is currently running only on humans. Also consider the introduction of alternative taxonomic classifiers. Here we have a number of examples in other nf-core pipelines. I hope this is enough of an overview. For now, we have published a paper on the International Journal of Molecular Sciences very recently, where the nf-core community is a collective author in the publication as well. There you can find more details about, particularly about the scientific findings that we have collected by analyzing the different species we have used for testing of the pipeline. I'm open to take any questions.
-
-[14:47](https://www.youtube.com/watch?v=nDaRt2L-tRw&list=PL3xpfTVZLcNiSvvPWORbO32S1WDJqKp1e&index=69&t=887)
-(host) Thank you very much. I have enabled now for anyone to unmute themselves if they want to ask any questions. Or you can also write questions in the chat. I will then read them out. It seems that it was very clear to everyone. If there are any further questions, you can always ask them in the nf-core Slack. Or you can ask directly, Francesco, I assume.
-
-(speaker) Yes, definitely. Both on Slack and via email.
-
-(host) Then I would like to thank you again, Francesco, and also the Chan Zuckerberg Initiative for funding our bytesize talks, and all of the audience for listening. Thank you very much.
-
-(speaker) Thank you.
-
-
diff --git a/markdown/events/2023/bytesize_mag.md b/markdown/events/2023/bytesize_mag.md
deleted file mode 100644
index 20213691b8..0000000000
--- a/markdown/events/2023/bytesize_mag.md
+++ /dev/null
@@ -1,112 +0,0 @@
----
-title: 'Bytesize: nf-core/mag'
-subtitle: Sabrina Krakau - University Tรผbingen, QBiC
-type: talk
-start_date: '2023-02-28'
-start_time: '13:00 CET'
-end_date: '2023-02-28'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=IiorfDHeoLo
-embed_at: 'mag'
-location_url:
- - https://doi.org/10.6084/m9.figshare.22210879.v1
- - https://www.youtube.com/watch?v=IiorfDHeoLo
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/mag
-
-This week, Sabrina Krakau ([@skrakau](https://github.com/skrakau)) is going to introduce nf-core/mag. nf-core/mag is a bioinformatics best-practise analysis pipeline for assembly, binning and annotation of metagenomes.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=IiorfDHeoLo&t=1)
-Hello, everyone, and welcome to this week's bytesize talk. I'm happy to present to you today Sabrina Krakau. She is situated at QBiC at the University of Tรผbingen. She is talking today about the nf-core pipeline mag and off to you.
-
-[0:21](https://www.youtube.com/watch?v=IiorfDHeoLo&t=21)
-Thanks Franziska for this kind introduction. I'm very happy that it finally works out to also present the nf-core/mag pipeline to all of you. This pipeline you can use for metagenome hybrid assembly and binning. The goal of this pipeline is to analyze microbial communities by recovering individual genomes. This might be, for example, particularly useful if you do not have a complete set or high quality reference genomes given. Such microbial communities could be everything, for example, environmental samples, but also host associated communities such as the gut microbiome.
-
-[1:02](https://www.youtube.com/watch?v=IiorfDHeoLo&t=62)
-The microbiome samples can be processed with metagenome shotgun sequencing, which generates short reads. The nf-core/mag pipeline then essentially combines these reads and assembles them to larger contigs. In a downstream genome binning step, it bins these contigs to so-called metagenome assembled genomes, also called MAGs. These MAGs can then further be annotated and also taxonomically classified. That's the concept of the nf-core/mag pipeline. As for many nf-core pipelines, the development of this was a quite large community effort with many different contributors, so just mentioning the main important ones. It was started by Hadrien Gourlรฉ, then Daniel Straub contributed a lot since early on, then I joined, and also since last year, James Yellows Yates is a main contributor of this pipeline.
-
-[2:04](https://www.youtube.com/watch?v=IiorfDHeoLo&t=124)
-Now I would like to mention the key features of this pipeline. It can perform a hybrid assembly using both short Illumina and long nanopore reads. This is useful because if you have assemblies generated only from short reads, they are often highly fragmented. By using additionally longer reads, this can improve the contiguity of such resulting assemblies. The pipeline also performs a genome binning step and optionally also a binning refinement step, then can taxonomically classify the resulting bins and also provides a comprehensive QC statistics. Furthermore, it can utilize sample-wise group information. This can be used for the co-assembly. This is important if you have data sets where you know that certain strains are present across multiple samples, such as within longitudinal data sets. Because the co-assembly can improve or increase the sequencing depth, this also allows to recover more lower abundant genomes. Additionally, the group information is also used for the computation of co-abundances, which is used in the genome binning step. Furthermore, the pipeline also allows the handling of ancient DNA, because it's containing ancient DNA validation sub-workflow, which is rather specific for this pipeline. A previous version of this pipeline was already published at the beginning of this year in NAR Genomics and Bioinformatics, so if someone's interested in more details, you can also have a look at this application note.
-
-[3:44](https://www.youtube.com/watch?v=IiorfDHeoLo&t=224)
-Here you can see an overview of the pipeline. The pipeline starts with different pre-processing steps and QC, then the actual assembly is performed with a final genome binning step. Here in green you can see the processes or different tools that are run by default by this pipeline. In the following I would like to guide you through the different steps of this pipeline in more detail. Just first, how can we actually run it? So here you can see an example of the Nextflow command that is typically used and in order to run it with default settings, just provide a sample sheet as input file.
-
-[4:26](https://www.youtube.com/watch?v=IiorfDHeoLo&t=266)
-Here you can see an example how the sample sheet looks like for this pipeline: it contains five columns. The first column contains a sample name, the second column contains a group name, in this case all samples belong to the same group. Then you have to provide the path to the input read files, either only to the short reads or to the short and long reads, so the long reads are optional. Starting with this sample sheet file now, or if you have only short reads you can also just provide a fastq file directly, the pipeline then pre-processes the short and long reads separately from each other with different pre-processing steps. I do not want to discuss them in detail. Maybe just mention that the host reads can also be removed by mapping the reads to given reference sequences. This information is also used indirectly for the long reads, since the long reads are filtered based on the already filtered short reads. The short reads can then further be taxonomically classified already. This can serve for example as a quality control in order to check for potential contaminations.
-
-[5:41](https://www.youtube.com/watch?v=IiorfDHeoLo&t=341)
-After these pre-processing steps then the actual assembly is done. This can be done sample-wise or the group information can be used in order to run a co-assembly, however by default this is done for each sample individually. By default the tools SPAdes and MEGAHIT are run both. However, you should keep in mind that if you have long reads given and you are interested in the hybrid assembly then only the tool SPAdes can be used for this. Then the tool QUAST is used in order to assess the quality of the resulting assemblies and also the assemblies are further processed with the tool PRODIGAL which predicts protein coding genes for this.
-
-[6:26](https://www.youtube.com/watch?v=IiorfDHeoLo&t=386)
-That's the assembly part and the contigs of these assemblies are then further processed in the genome binning step, where the tools MetaBAT2 and MaxBin2 are used, which now bin the contigs to retrieve the actual genomes. The results of these tools can also additionally be combined in a binning refinement step, which makes use of DAS Tool. The quality of these bins is as well assessed with the tool QUAST and in addition the tool BUSCO is used which makes use of single copy orthologs in order to estimate the contamination or the completeness of the retrieved genomes. Additionally the pipeline also uses a custom script, which estimates the abundance of the individual bins, because it's also a relatively important output of this pipeline. In further downstream processes then the bins are further taxonomically classified by default using the tool GTDB-Tk, and also annotated with the tool PROKKA. Finally a multiQC report is generated and also a relatively comprehensive MAG summary report.
-
-[7:41](https://www.youtube.com/watch?v=IiorfDHeoLo&t=461)
-How does the output of the pipeline look like? Besides all the individual results part of the individual tools, the pipeline generates a clustered heat map showing the MAG abundances across different samples. Here you can see an example how this looks like. If you would see here, for example, that certain samples cluster together for which you know that they are originating from different groups this might indicate that something has gone wrong. The pipeline also outputs the MAG summary, which I already mentioned. This contains for each bin or each MAG, the abundance information across different samples, the QC metrics from the BUSCO results and the QUAST results, and also taxonomic classifications from the tool GTDB-Tk.
-
-[8:33](https://www.youtube.com/watch?v=IiorfDHeoLo&t=513)
-And with this, I've shown you the rough overview of the pipeline and next I would like to show you the impact different assembly settings can have. For this I simulated some mouse gut data set in the past with the tool CAMISIM. I generated hybrid data containing Illumina data and Nanopore reads and generated two groups, each with a time series of four samples. This might be the ideal case where a co-assembly might be useful. Now I would like to show you some of the resulting assembly metrics that are commonly used.
-
-[9:14](https://www.youtube.com/watch?v=IiorfDHeoLo&t=554)
-Here you can see, for example, the total length of the resulting assemblies compared for different pipeline runs for which different assembly settings were used. The lower two pipeline settings correspond to a sample-wise assembly and using either only short or short and long reads, so hybrid data, and the upper two settings correspond to a co-assembly, again with short or short and long reads. And what we can see is that the total length of the resulting assemblies significantly increased both by using the hybrid setting, and by applying the co-assembly setting. Similar results we also see when looking at the number of MAGs, so the number of genomes that could be retrieved from this data, and also when looking at the N50 values. This indicates that the actual setting that is used for the assembly within this pipeline can have a relatively huge impact on the results. It's definitely good that the pipeline provides different settings, so that you can really choose the correct setting for your input data, and it might also be worth to compare different settings.
-
-[10:32](https://www.youtube.com/watch?v=IiorfDHeoLo&t=632)
-Another topic I would like to shortly mention is the resource requirements, because this came up quite often in the Slack channel, and it's also somehow difficult to estimate in advance, because it really differs depending on the input data. The main requirements are both for memory and time, coming from the assembly step. As I mentioned already it really differs for different input data sets and I collected some numbers just to give you a rough idea for different pipeline runs that were run by Daniel Straub on our compute cluster. For one rather small sample, which was a culture sample, both MegaHIT and SPAdes required less than 25 GB and were finished in a couple of hours. However, for a larger river sample data set, MegaHIT took already more than 100 GB of RAM, and it took more than one day to finish, and SPAdes even took more than 900 GB of memory, and it required more than nine days. There was another very large data set containing 15 soil samples for which also a co-assembly was performed and for this MegaHIT required one TB and more than 17 days, and SPAdes could not even be run because it would have required more than two TB of memory.
-
-[11:55](https://www.youtube.com/watch?v=IiorfDHeoLo&t=715)
-This just shows that even for smaller data sets, you cannot run this on your laptop. In general, one can say that it depends on the sequencing depths, the number of samples, the complexity of the underlying metagenome, and also on the applied tool and setting. For this it might be worth noting that both assembly tools are run by default but MegaHIT requires much fewer resources than SPAdes, and if you do not want to compute a hybrid assembly it might make sense to consider the `--skip_spades` parameter. Additionally, the co-assembly also increases the required resources because it pools samples. At least for one individual task, the required memory and time is much higher. This is something important to keep in mind, because also if you want to run it on larger data sets, you might want to provide a custom config file in order to adjust the resources required for your particular data set.
-
-[12:53](https://www.youtube.com/watch?v=IiorfDHeoLo&t=773)
-With this we have seen how we can run the nf-core/mag pipeline for modern metagenomic data sets. As I mentioned already at the beginning, it can also handle ancient DNA. For this James and Maxime added an ancient DNA validation sub-workflow. This is particularly interesting because, as far as we know at least, there's no other such pipeline which can handle ancient DNA. What this essentially does is that it performs identification of possible ancient contigs by modeling ancient DNA damage patterns, and then polishes the contigs in order to remove the errors that are caused by the presence of such ancient DNA damages in order to allow more unimpaired downstream analyses. This might be interesting for some of you to know that this pipeline can also handle ancient metagenomic data analysis.
-
-[13:52](https://www.youtube.com/watch?v=IiorfDHeoLo&t=832)
-With this, I'm already at the end of my presentation, just a few words on the outlook. The next release James already prepared, it just requires one more review. It contains another optional binning tool, namely CONCOCT. It will also allow optionally the bin QC with CheckM and GUNC. For the midterm future, it would be also very nice if a functional annotation step could be added, so depending on the strategy, either for example using HUMAnN 3 or eggNOG, and also a standalone long read assembly option would be very nice by using, for example, the tool meta-flye, such that the pipeline could be also run without short read data.
-
-[14:40](https://www.youtube.com/watch?v=IiorfDHeoLo&t=880)
-In general, if you are interested in contributing, or if you have any questions or problems you would like to discuss, you can join us in the nf-core Slack channel dedicated to the MAG pipeline, or have a look at our GitHub repository. We're always happy about feedback or particular bug reports and issues. With this, I would like to thank you for your attention. Then, in particular, my colleagues from QBiC, importantly Daniel Straub for many contributions, James and Maxime from the MPI for Evolutionary Anthropology, Hadrien, of course, and importantly, the whole nf-core core team and community for helping with the development, for reviewing, testing and creating issues. With this, I'm happy to take any questions.
-
-[15:30](https://www.youtube.com/watch?v=IiorfDHeoLo&t=930)
-(host) Thank you very much. There is indeed one question already in the chat.
-
-(question) It was at the very beginning when you were talking about examples, and you mentioned CAMISIM. Could you explain more in detail what this is?
-
-(answer) This is a tool which was also used in the CAMI challenge to simulate metagenomics data. It's using as input different genome sources. I used in this case a set of mouse genome sources, which was given from some mouse gut data sets. Then it can simulate Illumina and nanopore data and simulate also different taxonomic profiles. But the more details, I would also have to look up, it was quite a while ago. Was there any particular question about this?
-
-(question cont.) No, it was just a question, "what is CAMISIM?", but I think James has now added some links to articles. If anyone is interested, they can have a look at that.
-
-[16:43](https://www.youtube.com/watch?v=IiorfDHeoLo&t=1003)
-(host) For anyone else, if there are more questions, you can now unmute yourself and just ask them straight away. Or you can put them in the chat and I will read them out for you.
-
-(question) I would actually have a question. What happens to multi-mappers? I can imagine that if you have related bacteria that it would also map to different ones. How does the pipeline deal with that?
-
-(speaker) I mean, this is handled by the assembly tools then somehow.
-
-(question cont.) But are they removed or added to all of them? Any idea?
-
-(speaker) Someone of the others are more in the details of this algorithmic parts of the assembler.
-
-(audience) Do you mean when you're mapping back to the contigs or during the assembly itself?
-
-(question cont.) During the assembly. I mean, you map to the genomes, I guess?
-
-(audience cont.) No. We need to explain the main concept there. But there's some fancy maths magic that goes on which estimates which reads most likely go with each other based on the number of mutations they have with each other. There's some weird maths stuff which works out which is the best grouping.
-
-(question cont.) Okay, then I misunderstood that part. Thank you.
-
-[18:15](https://www.youtube.com/watch?v=IiorfDHeoLo&t=1095)
-(host) Are there any more questions from the audience? It doesn't seem so. If you have any more questions later on, as you mentioned, you can always go to nf-core Slack and ask questions there. If this is now all the questions answered so far, I would like to thank Sabrina again for this very nice talk. Of course, as usual, I would also like to thank the Chan Zuckerberg Initiative for funding the bytesize talks and of course everyone in the audience for listening. Thank you very much.
-
-(speaker)) Thanks.
-
-
diff --git a/markdown/events/2023/bytesize_mentoring.md b/markdown/events/2023/bytesize_mentoring.md
deleted file mode 100644
index 72029995e4..0000000000
--- a/markdown/events/2023/bytesize_mentoring.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: 'Bytesize: Experiences in the nf-core Mentoring Program'
-subtitle: Mariana Guilardi (Federal University of Sao Paulo, UNIFESP) and Alyssa Briggs (The University of Texas at Dallas)
-type: talk
-start_date: '2023-05-09'
-start_time: '14:00 CEST'
-end_date: '2023-05-09'
-end_time: '14:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=K8gvK1drt0w
-location_url:
- - https://www.youtube.com/watch?v=K8gvK1drt0w
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: Experiences in the nf-core Mentoring Program
-
-Mariana Guiladi ([@mariguilardi](https://github.com/mariguilardi)) and Alyssa Briggs ([@alyssa-ab](https://github.com/alyssa-ab)) were part of the nf-core mentoring program that concluded end of March 2023. In this weeks bytesize talk they will share their experiences as mentee and mentor, respectively, during the course of the program.
diff --git a/markdown/events/2023/bytesize_modules_patch.md b/markdown/events/2023/bytesize_modules_patch.md
deleted file mode 100644
index 716961fc0b..0000000000
--- a/markdown/events/2023/bytesize_modules_patch.md
+++ /dev/null
@@ -1,83 +0,0 @@
----
-title: 'Bytesize: nf-core modules patch'
-subtitle: Phil Ewels - Seqera Labs
-type: talk
-start_date: '2023-03-07'
-start_time: '13:00 CET'
-end_date: '2023-03-07'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=7pu6Ikhi1eU
-location_url:
- - https://doi.org/10.6084/m9.figshare.22231987.v1
- - https://www.youtube.com/watch?v=7pu6Ikhi1eU
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core modules patch
-
-Have you ever wanted to tweak an nf-core module for your own use? No need to copy / paste and lose the benefits of linting and updates, instead try using the magic `nf-core modules patch` command to keep track of your modifications.
-
-Sounds too good to be true? Join us to hear more at this week's bytesize talk with Phil Ewels ([@ewels](https://github.com/ewels))!
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=1)
-Hi everyone, welcome to today's nf-core bytesize talk. My name is Phil Ewels and today I will be talking to you about the `nf-core modules patch` functionality. This is a very simple functionality, so I'm thinking today's bytesize talk will be fairly short. Many people don't know that it exists and I think it could be quite useful, especially for people using nf-core tooling and the nf-core templates for pipelines, either private or custom, which are not going to be part of the main nf-core organization. This is where this tooling really, really shines. If you want to use the nf-core templates for stuff you're doing in-house, this talk is for you. I don't have any slides or anything, it's just going to be a live demo, I'm going to walk through how I use it and try and describe what it's doing in the back end and hopefully that will make sense to you.
-
-[1:01](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=61)
-Those of you who've seen me talk before will know that I love a good live demo, things usually go wrong, but that's part of the fun of it, so let's join me on this rollercoaster. Just before I kick off, a little bit of background information, what we're talking about here. For those of you familiar with this, apologies, but just to get everyone up to the same level: with nf-core we have a pipeline template for the whole pipeline and then in the last year or two with DSL2 we've been working with modules. These are wrappers around specific tools, so this is on pipeline level and is one workflow all the way through from start to end analysis. A module is just a single tool and we have shared modules which people can collaborate on, which you can install into a pipeline. When you make changes to a module, which is a centralised module, those changes can be easily integrated into every pipeline that is using that module.
-
-[2:03](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=123)
-The classic example and the one I'm going to be working with today would be FastQC, a QC tool for DNA sequencing data used by, I don't know how many pipelines within nf-core, but very many. We've been chatting on Slack yesterday and today about some updates. There's a new version of MultiQC that's come out and it's got some new options like `--memory`, `--svg` and stuff, and we've been talking about those updates and we can just do that in one pull request, one discussion on one module and then all the pipelines can just pull in those changes across the board and get that new functionality which is fantastic. So pipelines, modules.
-
-[2:39](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=159)
-In order to make all of this system work, it's really important that the code within the pipelines, the modules within the pipeline is the same as the code in the central modules repository. That makes sense. If you want to synchronise the two, you need to keep them tightly together. What that means though is you can't change the code in your pipeline. If you do that, the nf-core code linter will start complaining and tell you that you're not allowed to do that. What people have done before is, they take the centralised nf-core module and just copy it as a local module and then they can do whatever they want with it. They can change it and the linter won't complain. The downside of doing that is you're no longer in sync, so if there are updates that come into a centralised module, you won't see them, you won't be able to pull them in and you're effectively losing that collaborative aspect which is so powerful. This is where nf-core modules patch comes in as a stopgap if you like, a way for you to make changes to central modules in your pipeline - and your pipeline alone - whilst keeping the linter happy and keeping all the functionality about updating modules and so on. Hopefully that makes sense, if you want to ask me any questions at this point, shout, otherwise we can take questions at the end.
-
-[4:02](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=242)
-Now I will dive into a screenshare and show you how this thing works. Hopefully you can now all see my setup, I'll make the zoom toolbar as small as possible. My pet pipeline is the nf-core/methylseq pipeline, it's one of the first ones I ever wrote and it's one I'm still fairly involved with the maintenance for. Hopefully everyone is familiar with the idea of the nf-core lint command which runs all the code tests on every single module in all parts of the pipeline. Today for live demo purposes I'm going to do `modules lint` which just only lints the modules and not the entire pipeline and I'm actually going to make it just the fastQC module so things work nice and quickly.
-
-[4:54](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=294)
-If I run linting, make it a bit bigger, you can see that everything's fine, my pipeline's up to date with the central dev version of methylseq and I've got a couple of warnings about this module: there's a new version of software available and there's a new version of the central module available. But they're both warnings, they're not failures, so that's my starting point. We were talking about new fastQC functionality. This is VS-Code, I'm looking at the methylseq pipeline source code now. This is not the central modules repo, this is my pipeline. If I go into modules I've got the local ones and I've got nf-core, scroll down you can see I've got the fastQC one and this is the shared fastQC module. Now I could make changes and drop into local but I'm not going to do that today. Let's say that things are moving too slowly, I want to do something here myself. What I can do is drop something custom in here, let's say I'm going to have a new input channel to handle SVGs and I want to do it just on my pipeline. I'm going to hit save. You know, assume that I'm doing some valid change here and I've tested locally, the Nextflow side of things is working fine and it's doing what I want. Now if I rerun this linting test it's going to be unhappy with me because this lint test checks the version on the web on the nf-core modules repository, looks at the code there and checks the code that I have locally and in this case it says this code does not match the version of the module that you say you have and so that's a hard failure. All continuous integration tests on GitHub will start giving a red cross and failing and this is not a good situation. This is normally where you freak out and copy it to local or something.
-
-[6:43](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=403)
-But now I'm going to do some magic. Now I do `nf-core modules patch`. I run this command, it asks me which module I want to do it for, the FastQC, that's where I've made my changes and it just very quickly spits out some content. First things first, this is a diff, so this is where it's looked at the remote file and the local file for any changes. You can see it's picked up here that there's some code in my local chain copy which has changed. This looks right, this is what I just added, so it says there's an extra line here. Now these diff files are really cool because with diff you can generate these diff files or patch files and you can apply them on top of other files, so we can reapply this change at any time. That's what we do, we save this diff and if I go to git status you can see I've got changes to my modules.json file which is used by nf-core to track the synchronisation between my local pipeline and the shared modules repository. I've got the changes in the FastQC file which is the thing I just edited and saved and I've got a new file here called Fastqc.diff. If I go back into the VS-Code we can see that this diff file is just what was printed to the console here and it's just saying alongside the FastQC module, hey I've got some local changes here. Then if I go into modules.schema you can see if I find FastQC that we've got a new line that's been added in the JSON file here and it's just telling nf-core that there is a patch file that exists in this location.
-
-[8:19](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=499)
-Okay so great, what does that do? I can add all of this, let's make a new branch. Now if I go back to the lint commands, `nf-core modules lint`, which was failing, we're back to our starting position. Everything's fine, everything's happy. Now just to explain what's going on here, in the background I've still got those local changes but in the background when I do `nf-core lint`, the nf-core code is fetching the remote version from the nf-core modules repository but then it knows I have a patch file, that diff file. It stores that local copy it's got from the web, it applies the patch file on top and then it compares. That's why there are no changes. If I make some more changes in here again, so `val foo`, then that's not going to be in the patch file and it's going to fail. In fact it did a hard failure where it couldn't even figure out what was going on if I do it in a different place here. Then it will just fail again and say that something has changed. Then I could run `nf-core modules patch` again, it will update that diff file but now there's new changes are covered by the diff and everything will work again. Hopefully that makes sense.
-
-[9:50](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=590)
-What's cool is it's not just linting, this applies to. The same process works when I update modules. We've got this lint warning that there's a new version of this module available. I can also do `nf-core modules update`, let's just do FastQC and hopefully, yes, there we go. It has updated the module for me, so it's gone to the nf-core remote with a shared one and it's updated my local copy and then it's managed to still reapply the patch file on top of a new updated version and save that there. `nf-core update` still works, which is like magic, I think. We can see these are all the new changes that have come in. There's a new git_sha for this module for the latest version when you can see the changes which happened when I updated the module.
-
-[10:49](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=649)
-Right, hopefully that's all you need to know, everything works beautifully, but I thought for completeness, I would also show you one small complication of when things could go wrong. We got a hint of it a second ago, actually. Something that could happen is if I do reset, so just go back to when we first made the patch before we updated. Now I can add a different change here. Now I'm going to add `--svg` onto the FastQC command itself. I execute `git diff`and you can see it's the same, `nf-core modules lint fastqc`, you can see it's the same, this is all the same, `nf-core modules patch fastqc`, yes, regenerate the patch, okay, so now our patch file has got two changes here, that's good, and `nf-core modules lint fastqc`. It's the second change, but everything is exactly the same so far. Now the tricky bit comes now, if I go to `nf-core modules update fastqc`, just like I did before, it will fail.
-
-[12:14](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=734)
-Now what's happened here, I've got a couple of warning messages saying it's failed to apply the patch. You will have to apply the patch manually. This is a little bit like when you are working on code and you put in a pull request and you get a merge conflict, there have been changes that have happened on the central nf-core modules repository, and there have been my local changes which I've done with those patch files, and the tool can't automatically figure out how to reconcile those two changes. What it's done is it's just clobbered my local changes. If I go in here, you can see it's made the updates, but I've lost all my custom changes and it's just overwritten it with what was on the remote copy. All I have to do is I have to go back in and I have to just recreate this patch. I can go back in and go `--svg` and `val svg`, and then rerun nf-core modules patch. That's fine, so just bear that in mind. Sometimes when you do updates and you have local patches, you might need to do a little bit of fiddling. Just be careful with always running git commit before you do stuff, because then you don't lose anything and you can easily see which changes are happening.
-
-[13:28](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=808)
-Right, that's my live demo. Hopefully everybody followed along with that and it made sense. Yes, sorry. Fran says in the comments that when I said earlier about shouting, you can shout literally if you want to, but also then just you can ask. Happy to take any questions and hopefully this will be useful for some of you.
-
-(host) You can unmute yourself now if you want to ask questions. Lots of happy people, very few questions.
-
-(speaker) That's good. It's quite a nice, neat little small topic to discuss, so I had plenty of time for it.
-
-[14:20](https://www.youtube.com/watch?v=7pu6Ikhi1eU&t=860)
-(question) Sure, but what happens if you realize that you actually have more changes than you actually want to include, like if there's something that make the whole pipeline fail or whatever. Can you undo the patch.
-
-(speaker) When you do the update, you mean?
-
-(question cont.) No, when you have written something, you get errors and it's like, ah, but this is because I did something manual that I want to be different. You do a patch and then you realize, oops, actually that was something completely different and messed everything up.
-
-(answer) Then it's no difference if you made changes in any other way. You just go back and you look at your git changes and you revert in git or whatever. This part of the workflow is specific to just the nf-core tooling, just the linting, the updating. It's coming in at the end once you've already fiddled around and made Nextflow in your pipeline work properly in the way you want it to.
-
-(host) Cool. Then thank you very much. Also thank you everyone for listening and as usual, I would like to thank the Chan Zuckerberg Initiative for funding our bytesize talks and "Hello" also to Maxime.
-
-
diff --git a/markdown/events/2023/bytesize_nf-core-taxprofiler.md b/markdown/events/2023/bytesize_nf-core-taxprofiler.md
deleted file mode 100644
index 37c9621f1d..0000000000
--- a/markdown/events/2023/bytesize_nf-core-taxprofiler.md
+++ /dev/null
@@ -1,85 +0,0 @@
----
-title: 'Bytesize: nf-core/taxprofiler'
-subtitle: Sofia Stamouli, Karolinska Institutet
-type: talk
-start_date: '2023-01-17'
-start_time: '13:00 CET'
-end_date: '2023-01-17'
-end_time: '13:30 CET'
-embed_at: 'taxprofiler'
-youtube_embed: https://www.youtube.com/watch?v=p1EQtidJiUY
-location_url:
- - https://www.youtube.com/watch?v=p1EQtidJiUY
- - https://doi.org/10.6084/m9.figshare.21916416.v1 (video)
- - https://doi.org/10.6084/m9.figshare.21916386.v1 (slides)
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/taxprofiler
-
-This week Sofia Stamouli ([@sofstam](https://github.com/sofstam)) will talk about nf-core/taxprofiler, a bioinformatics best-practice analysis pipeline for taxonomic profiling of shotgun metagenomic data. It allows for in-parallel profiling with multiple profiling tools against multiple databases, produces standardised output tables.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=p1EQtidJiUY&t=1)
-(host) Hello, everyone, and welcome to the first bytesize talk of 2023, and I'm very, very happy to have Sofia Stamouli present today a new pipeline called nf-core/taxprofiler, which is soon to be released, I've heard. Off to you, Sofia.
-
-[0:23](https://www.youtube.com/watch?v=p1EQtidJiUY&t=23)
-Hello, everyone. I'm going to talk about nf-core/taxprofiler, which is using the GitHub description "a bioinformatics best practice analysis pipeline for taxonomic classification and profiling of shotgun metagenomic data". In the talk today, I will briefly introduce what is shotgun metagenomics and how the development of tax-profiler started. I will give an overview of the nf-core/taxprofiler pipeline and how you can use and run the pipeline, as well as our upcoming development plans.
-
-[1:07](https://www.youtube.com/watch?v=p1EQtidJiUY&t=67)
-To start with, what is shotgun metagenomics sequencing? I borrowed the description from Quince's paper from 2017 that describes shotgun metagenomic sequencing as the untargeted sequencing of all microbial genomes present in a sample. It allows for the determination of the taxonomic diversity in a sample. We may be looking at bacteria, viruses, fungi, archaea, or a combination of those, that are present in a sample. The development started in February 2022 by James Fellows Yates and Moritz Beber. We at Karolinska Institute joined during the online hackathon in March.
-
-[2:00](https://www.youtube.com/watch?v=p1EQtidJiUY&t=120)
-With that, I would like to mention that this is really a community-based development. There are a few nf-core pipelines, like eager and mag, that support some sort of taxonomic classification. But they only support one classifier. Each classifier is tailored for specific purposes, each one has its own custom output format. There was really a need to have a pipeline that would support taxonomic classification and profiling of metagenomic reads using both, multiple tools and multiple databases. There are at the moment a few examples of how you can use nf-core/taxprofiler. Some of those different contexts is pathogen detection in clinical metagenomics. One can use it for a comparative microbiome diversity analysis as well as detection of food DNA from enzyme microbiome samples. But of course, they are not only limited to those.
-
-[3:15](https://www.youtube.com/watch?v=p1EQtidJiUY&t=195)
-This is the overview of how the pipeline looks like. I will go into more details in the next slides. To start with, it supports both short reads and long reads. The first step is the sequencing quality control. Right now, FastQC is used as a default. But during hackathon in October in Barcelona, falco has been added as a drop-in replacement, which supposedly is an improvement especially for long reads. The user can choose between either FastQC or falco. Next we have the pre-processing steps. All of those are optional and up to the needs of the user. We have dedicated tools for each sequencing technology. The first step is the adapter removal where fastp and AdapterRemoval is supported for short reads and Porechop for long reads. Then taxprofiler allows for removal of low complexity reads with BBDuk and PRINSEQ++ for short reads and Filtlong for long reads. The user can also choose to remove the host reads using bowtie2 aligner for short reads and minimap2 for long reads. As the last step of those pre-processing steps, taxprofiler allows for concatenation of multiple FastQ runs or libraries of a sample.
-
-[5:08](https://www.youtube.com/watch?v=p1EQtidJiUY&t=308)
-The last step of taxprofiler is, of course, taxonomic classification. Right now we support nine classifiers/profilers with kraken2 being paired with Bracken, KrakenUnique, MetaPhlAn3, MALT, DIAMOND, Centrifuge, Kaiju, and mOTUs. Each profiler can be executed with multiple databases. It's with their own settings. Each profiler has its own output. Because each profile classifier has its own output format, taxprofiler supports standardized and aggregated taxon count tables with the help of taxpasta, that is a Python package and with Moritz Beber is leading the development. It stands for taxonomic profile aggregation and standardization. I added the link to the GitHub repository.
-
-[6:23](https://www.youtube.com/watch?v=p1EQtidJiUY&t=)
-In this slide, I'm going to talk about how taxpasta works. Here you can see an example of how the output of the kraken2 classifier looks like. It has six columns: the percentage of reads covered, the number of reads covered, the number of reads assigned; This column here describes the taxonomic level, this one describes NCBI's taxonomy ID, and this is the scientific name of each taxon. This is how the output from the Kaiju classifier looks like. It has five columns, it also has header and it is very different from kraken2. This is the case for all the different classifiers. With taxpasta, we are really able to have a standardized output format for each classifier. The output format looks like this. It has two columns. The first one describes the taxonomy ID, and this column describes the read counts.
-
-[7:38](https://www.youtube.com/watch?v=p1EQtidJiUY&t=)
-About how to run the pipeline, one would need two input sample sheets: one describing the FASTQ files and one describing the databases. This is how format of the sample sheet that describes the FASTQ files should looks like. The first column should describe a unique sample name. The user can add a run accession, and should describe the name of the sequencing platform, as well as the path to the FASTQ files. Regarding the sample sheet describing the databases, this is how it looks like. It is four columns. In the first column one should give the name of the classification tool. Here is a unique name, based on the database. In this column, the user can specify the parameters that they would like to use. The fourth column describes the path to each database. About `TOOL1` and `TOOL2` (the argument here), those can be replaced by its classifier or profiler that is desired by the user. The last argument, the `perform_step`, this can be replaced by pre-processing or post-processing steps.
-
-[9:26](https://www.youtube.com/watch?v=p1EQtidJiUY&t=566)
-About our future plans, we would like to support more taxonomic classifiers, particularly for long reads. We would like to add an assignment validation step by aligning matched reads to identify the genomes, and we would like to add the workflow for database construction. But before we go on with the implementation of those plans, please stay tuned for the first release in January. With that, I would like to thank James Fellows Yates in Germany and Moritz Beber in Denmark, as well as my colleagues here in Sweden: Tanja Normark, Mahwash Jamy, Lauri Mesilaakso, and of course all the collaborators that contributed with different classifiers and issues in taxprofiler. If you have any questions, please reach out to our Slack channel with the hashtag taxprofiler, and that's it. I'm happy to answer any questions.
-
-[10:43](https://www.youtube.com/watch?v=p1EQtidJiUY&t=643)
-(host) Thank you very much, Sofia. Are there now any questions in the audience? You can either write your questions in the chat, or you can unmute yourself. I allowed that now for anyone. If there are no questions at the moment, I actually have a question.
-
-(question) I was wondering why there are so many of these profilers, because, I mean, if there was one that actually would work properly, then you would only need that one.
-
-(answer) The metagenomics field is very broad, and with those classifiers, they're based on different algorithms, and they cover different needs.
-
-(question cont.) The final output that you have now, is that an average of what the different ones detect, or?
-
-(answer cont.) We have a different output for each classifier, and we have, with the help of taxpasta, we are able to have a standardized output for each of those classifiers.
-
-(question cont.) Okay, but you will get a separate output for each classifier?
-
-(answer cont.) Yes. At the moment, yeah.
-
-[12:08](https://www.youtube.com/watch?v=p1EQtidJiUY&t=728)
-(question) Then we have here questions in the chat. One is from Juan. Do you have to download the databases manually?
-
-(answer) Yes. We do not support it right now. It's in our future plans, maybe to add a workflow for database construction, but the user has to do it by themselves right now.
-
-[12:29](https://www.youtube.com/watch?v=p1EQtidJiUY&t=749)
-(comment) Then a comment from James. I guess it is for the profiler question I had. He says it's also a fun problem for computer scientists. Thank you.
-
-[12:42](https://www.youtube.com/watch?v=p1EQtidJiUY&t=762)
-(host) Are there any more questions? It doesn't seem to be like. If there are questions later on, you can always reach out, as you mentioned, in the Slack channel for taxprofiler, or also in the bytesize channel. Otherwise, I would like to thank Sofia again for this great talk, and of course, also, the Chan Zuckerberg Initiative for funding these talks. Thank you very much, everyone, and I hope to see you next week.
-
-
diff --git a/markdown/events/2023/bytesize_nf_validation.md b/markdown/events/2023/bytesize_nf_validation.md
deleted file mode 100644
index 11fd493ad2..0000000000
--- a/markdown/events/2023/bytesize_nf_validation.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: 'Bytesize: nf-validation'
-subtitle: Jรบlia Mir Pedrol, QBIC and Nicolas Vannieuwkerke, Center for Medical Genetics Ghent
-type: talk
-start_date: '2023-06-06'
-start_time: '13:00 CEST'
-end_date: '2023-06-06'
-end_time: '13:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=rr9FTlQayIE
-location_url:
- - https://www.youtube.com/watch?v=rr9FTlQayIE
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-validation
-
-This week Jรบlia Mir Pedrol ([@mirpedrol](https://github.com/mirpedrol)) and Nicolas Vannieuwkerke ([@nvnieuwk](https://github.com/nvnieuwk)) are presenting nf-validation, the soon to be released plugin to validate the input parameters and sample sheets for Nextflow pipelines.
diff --git a/markdown/events/2023/bytesize_precommit.md b/markdown/events/2023/bytesize_precommit.md
deleted file mode 100644
index 3fb2defd60..0000000000
--- a/markdown/events/2023/bytesize_precommit.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-title: 'Bytesize: nf-core/pre-commit'
-subtitle: Matthias Hรถrtenhuber, Scilifelab Data Centre, Sweden
-type: talk
-start_date: '2023-02-07'
-start_time: '13:00 CET'
-end_date: '2023-02-07'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=08d6zv6zvdM
-location_url:
- - https://doi.org/10.6084/m9.figshare.22047485.v1
- - https://www.youtube.com/watch?v=08d6zv6zvdM
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/pre-commit
-
-This week, Matthias Hรถrtenhuber ([@mashehu](https://github.com/mashehu)) is going to explain the use the newly added pre-commit tool added to nf-core/tools. Pre-commits were developed to inspect the snapshot that is about to be committed and helps to check formatting etc. before adding the code to the repository.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=08d6zv6zvdM&t=1)
-(host) Hello everyone to today's Bitesize talk. I'm Franziska Bonath. I'm the host today. With me is Matthias Hรถrtenhuber. He is going to talk about pre-commit.
-
-[0:11](https://www.youtube.com/watch?v=08d6zv6zvdM&t=11)
-Yes. Hi. It will be a short one. Let's get going with it. The title is "pre-commit, hooked on a commit". It's about how we're trying to solve this problem a lot of us run into: You commit your code, then GitHub CI runs prettier, it finds a mistake and asks very passive aggressively, if you maybe forgot to run prettier. First solution is, of course, to ask nf-core-bot to please fix the linting. I did a quick search. Actually, 260 PRs have at least one comment, where somebody asked the bot to fix the linting. That's nice that people are using the bot. But actually, maybe we should try to save the ice bears and give nf-core-bot sometimes a break, because these things we can do differently.
-What if you don't need to run the bot, because you already run prettier when you commit your code, or actually before you commit your code? We use this tool called pre-commit, which runs prettier when you hit `git commit`. It runs prettier, checks if the code is fine and makes changes on it.
-
-[1:43](https://www.youtube.com/watch?v=08d6zv6zvdM&t=103)
-How do you set pre-commit up? Well, the good news is it comes already pre-installed with the nf-core tools as a dependency. Then you need to have a `pre-commit-config.yaml` file. For example, this one here for prettier. Also, more good news, in the next tools release, this will be part of every pipeline template. Also in the modules repository, we have that set up. It doesn't change anything for you that we have it there. But if you then also run in your repository `pre-commit install`, it actually sets up this git hook. Whenever you hit commit, prettier is run beforehand and doesn't allow you to commit until you fixed these changes.
-
-[2:39](https://www.youtube.com/watch?v=08d6zv6zvdM&t=159)
-How does it look like? I made here a short example where I just added a line in the README file. As you can see, it's just below a heading, so Prettier will not like it. I run commit. Actually, prettier was run and fixed it. But that's the important thing, it didn't commit it yet. It's changed but not added. I actually need to run commit a second time. That's something I sometimes forget. You always need to run git commit twice if there is something wrong. If nothing is wrong, your prettier passes, then the commit runs through. This is prettier, which we use for markdown files and similar files.
-
-[3:35](https://www.youtube.com/watch?v=08d6zv6zvdM&t=215)
-One of the nice things with pre-commit is that you can use every code linting tool you want. Also, it doesn't matter if it's in a different language. Like you see with prettier, which is actually an NPM tool. We don't need to have node installed to run this version of prettier with pre-commit. It just comes through the mirror there. But other tools like Python-based tools come directly from the tools itself. If we set up this config, like we have for the tools repository, for example, it automatically checks the Python files with Black and isort. In this example, I added again to the README file, but I also switched the import statements in our main.py. If I then run commit, it not only runs prettier, but it also runs Black and isort. Black was satisfied, isort found that there was an error there and fixed it fast. Prettier fixed it fast as well within the README. The only changes then were in the README, because we already had it nicely sorted before. Hit the second commit. Now the code is nice again. That was pretty much it.
-
-[5:05](https://www.youtube.com/watch?v=08d6zv6zvdM&t=305)
-Just a quick shout out to the person who actually brought this tool to us, which was Fabian. It was more than an idea how to always have prettier available in tools without requiring people to install node. He found this tool. Actually it's very nice with having it also run with pipelines and everything. Praise Fabian. With that, I'm open for any questions if there are.
-
-[5:38](https://www.youtube.com/watch?v=08d6zv6zvdM&t=338)
-(host) Thank you very much. If you have any questions, then you can either write them in the chat, or you can just ask them straight away. Everyone should be now able to unmute themselves. Do we have any questions? It doesn't seem so. Maxime. Maxime says hello or has a question?
-
-(question) Yes, I have a question. You said that it's in tools. Is it already released, or is it in the coming release.
-
-(answer) In tools, it's already in the release, because actually we run Prettier with pre-commit whenever we dump YAML files or JSON files in tools. For example, when we create the tests for modules, these files are now prettified with Prettier, because before we had the problem that our function actually dumped code, Prettier didn't like. Now we run pre-commit to run Prettier on it. Also the repository itslef has Black and isort. There, it's already in. But for the pipelines, the next release will have it in a template. Then all the pipelines also get it. You just need to run `pre-commit install` to activate it. Modules since yesterday got the got the config in. If you now are in module. Pull. Then run `pre-commit install`, all your modules will automatically run Prettier. Or all changes on modules will automatically be run through Prettier before you can commit. I recommend to do that. If you write modules or subworkflows.
-
-[7:31](https://www.youtube.com/watch?v=08d6zv6zvdM&t=451)
-(host) Thank you. Are there any more questions? If not, then I would like to thank our speaker and also the Chan Zuckerberg Initiative for funding the talks. As usual, you can ask more questions if you have any in Slack. This will be uploaded to YouTube. Thank you very much.
-
-(speaker) Bye, everybody.
-
-
diff --git a/markdown/events/2023/bytesize_python_packaging.md b/markdown/events/2023/bytesize_python_packaging.md
deleted file mode 100644
index 9f166ee637..0000000000
--- a/markdown/events/2023/bytesize_python_packaging.md
+++ /dev/null
@@ -1,126 +0,0 @@
----
-title: 'Bytesize: Converting Python scripts into packages for PyPI, Bioconda & Biocontainers'
-subtitle: Phil Ewels - Seqera Labs
-type: talk
-start_date: '2023-05-02'
-start_time: '13:00 CEST'
-end_date: '2023-05-02'
-end_time: '13:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=hOuS6mXCwhk
-location_url:
- - https://www.youtube.com/watch?v=hOuS6mXCwhk
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: Converting Python scripts into packages for PyPI, Bioconda & Biocontainers
-
-This week, Phil Ewels ([@ewels](https://github.com/ewels/)) will show you how to take a Python script and turn it into a stand-alone command-line tool, ready for distribution via the [Python Package Index](https://pypi.org/) (PyPI).
-
-> You can download a `.zip` file of the "before" and "after" code examples Phil demoed [here](/assets/markdown_assets/events/2023/bytesize-python-packaging/python-packaging.zip).
-
-This is a good thing to do for a few reasons:
-
-- More people can use your scripts - not just within Nextflow
- - This is useful for development, for stand-alone testing
- - It's useful for people using other workflow managers
- - It helps when users are testing a method / debugging with small sample sizes
-- It allows scripts to be released under different licenses to the pipeline itself
-- Software packaging, that is providing container images with all requirements, is handled automatically
-
-Even if it's a small script that you think no-one will ever use outside of your pipeline, it's easy to do and you don't lose anything ๐
-
-Once released in PyPI, releases via [Bioconda](https://bioconda.github.io/) are simple (see [Bytesize 40: Software packaging](https://nf-co.re/events/2022/bytesize-40-software-packaging)).
-Once in Bioconda, software will be available for Conda users, but also Docker + Singularity, via the [BioContainers](https://biocontainers.pro/) project.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1)
-Hello, everyone, and welcome to this week's bytesize talk. I'm very happy to have Phil here, who is talking today about converting Python scripts into packages for PyPI, Bioconda, and biocontainers. It's your stage, Phil.
-
-[0:17](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=17)
-Thank you. Hi, everybody. Thank you for joining me today. I'm going to have a little bit of fun together, hopefully. Today's talk was inspired by a conversation that's come up a few times within nf-core, which is when people have got scripts within a pipeline, so typically within a bin directory, or it could be within the exact shell block of a process. Instead of bundling that script with the pipeline, we instead prefer to package that script - or set of scripts - as a standalone software package instead. There are a few different reasons why we like to do this. Firstly, it makes the package and the analysis scripts available to anyone to use, even if they're not using Nextflow and not using this pipeline, so that's for the greater good of a community. More reusability, more visibility. It can sometimes help with licensing because we're no longer bundling and modifying code under potentially a different license within the nf-core repo, so the nf-core repo can be MIT and can just call this external tool. It also helps with software packaging, as Fran mentioned. For free, then we get a Docker image, a Singularity image, a Conda package, with all of the different requirements that you might need, so you don't need to spend a lot of time thinking about all the different, setting up custom Docker images and all this stuff. You just package your own scripts as its own standalone tool and you get all of that stuff for free, so, much better. All the maintenance can sit alongside the pipeline rather than integrated into the pipeline. It's a nice thing to do and for me, the main reason is that first one, which is that it makes the tool more usable for anyone, not necessarily tied to running within Nextflow, which I think is great because it's nice to use tools on a small scale and then to scale up to using a full size pipeline when you need it.
-
-[2:16](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=136)
-I've told people in the past that this is easy, which it is, if you've done it lots of times before. But I thought it's probably time to put my money where my mouth is and actually show the process and hopefully convince you, too, that it isn't so bad. Now a few things to note before I kick off, firstly, I'm going to live code this. I have run through it earlier, so I've got a finished example on my side, which you can't see, which I will copy and paste from occasionally and hopefully refer to, if everything really goes wrong, but in the words of SpaceX, excitement is guaranteed because something will blow up at some point. So join me on that. Secondly, there are many, many ways to do this. My way is not necessarily the same as what I'm going to show and there are better ways to do things and probably recommendations that you should listen to from other people that are much better than mine. My aim today is to try and show you the easiest way to go from Python scripts to something on Bioconda, and I want to try and make that beginner friendly and as bytesized as possible.
-
-[3:28](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=208)
-Let's start by sharing my screen up here and we will kick off. Spotlight my screen for everybody, so hopefully you can still see my face. To start off with a famous XKCD comic about Python environments, which are famously complicated packaging environments. We're going into something which is known for being difficult and varied, but that's fine. I'm going to keep it as simple as possible and you don't need to worry about all this stuff. I've got a little toy Python script here, it doesn't do very much, it just makes a plot and I wanted some input, so it takes a text file here, delete that now, called `title.txt` with some text in it. It reads that file in, sets it as a variable and sets the plot title to whatever it found and then it saves it. This is our starting point, I can try and run this now. If I do `python analysis.py`, there we go, we've got our plot and my nice plot, so it works, first step. This is where I'm assuming you're starting off, is you have a Python script which works.
-
-[4:45](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=285)
-We have a few objectives to do, to take this script into a standalone Python package. Firstly we want to, as far as possible, make things optional and variable, so instead of having a fixed file name with a string like this, we want a better way to pass this information in to the tool, so we want to build the command line tool. We want to make it available ideally anywhere on the command line on the path, so make it into a proper command line tool rather than a script which you have to call using Python. We can call it "my_analysis_tool" or whatever and run that wherever. Once we've done all that stuff we want to package it up using Python packaging so that we have everything we need to push this package onto the Python package index, and we're going to focus on that. Once we've got this as a tool on PyPI, where anyone can install it, then the steps from PyPI to Conda is fairly easy. Once it's on Conda you get biocontainers for free which is the Docker image and the Singularity image. Really our destination for today is just Python packaging, just the PyPI. There's another talk, it's fairly old by now, but it's still totally valid, by Alex Peltzer on nf-core bytesize. It takes you from that Bioconda packaging steps, so you can follow on this this talk with that one. Hopefully that makes sense.
-
-[6:14](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=374)
-First steps first, let's try and make this into a command line tool. Now there are a bunch of different ways to do this, probably the classic Python library to do command line parsing is called argpass, which many of you may be familiar with. Personally I've tended to use another package called "click", and more recently I am tending to use a package called "typer" which is actually based on "click". If I just use the right browser, this is URL, "typer"."Typer", gosh it's quite big, on a bigger screen it looks, I'll just make my window bigger just for a second so not reading anything here but just seeing what the website really looks like. It's got a really good website, it explains a lot about how to use it and you can click through the tutorial here and it tells you about everything, what's happening, why it works and the way it does and how to build something. We can start off with this, the simplest example, and we're going to say `import typer` here. Go up to the top, `import typer`, wrap our code in a function name. I can't copy from the VS code browser apparently, so I'm going to indent all of this code. Then I'm going to copy in that last bit which was there... my other window... down at the bottom.
-
-[7:55](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=475)
-What's happening here? I'm importing a Python library called "typer", which is what we're using for the command line tool, I've put everything into a function which is just called `def __main__` and then at the bottom I've said `if __name__ == "__main__"`, so this is telling Python if this script is run directly, use "typer" to run this function. If I save that, now I can do Python analysis and nothing will happen, it should just work exactly the same, but I can do `python analysis --help` and you can see we're starting to get a command line tool come in here.
-
-[8:27](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=507)
-Next up, let's get rid of this file, we don't really care about it being in the file, that was just a convenience, so I'm going to say let's instead pass the title as a command line option. With "typer" we just do that by adding a function argument to this function and I can get rid of this bit completely. To prove it I'll delete that file as well. Let's try again, do `python analysis --help` and sure enough now we have some help text saying, hey they are expecting a title, which is text and we have no default. If I try and run it without any arguments it will give me a nice error message. Now if I say "hello there", it's passed that in and our plot has a different title. That is our first step complete. We have a rudimentary command line interface and we have got rid of that file and we've now got command line options which makes it a much more usable flexible tool and that was not a lot of code I think you'll agree with me. With "typer" you can do many more things. You can obviously add lots more arguments here. You can say it should be an integer or boolean and it will craft the command line for you. You can use options instead of arguments so `--whatever`. You can set defaults, you can write help texts, loads of stuff like that. As you your tool becomes more advanced, maybe you dig into the type of documentation a little bit and learn about how to do that, but that's beyond the scope of today's talk.
-
-[10:04](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=604)
-Next up, let's think about how to make this into an installable package and something we can run on the command line anywhere, those two things go together. If someone else comes and wants to run this package they're going to need to be able to import these same python packages, so I'm going to start off by making a new file called "requirements.txt" and I'm going to take these package names there and just pop them in there. We'll come back and use that in a minute and in the short term, if someone wanted to, they could now do `pip install -r requirements.txt` and that would install all the requirements for this tool. I'm also going to start moving stuff into some subdirectories and by convention I'm going to put it into a directory called "source". But it doesn't really matter, you can call it whatever you want. I'm going to call it "my_tool" and I'm going to move that python file up into that directory there. I'm also going to create a new file called `__init__.py`. This is a weird looking file name and it's a special case. By doing this in python, it tells the python packaging system that this folder's directory behaves as a python module, which is what we want to install later and so I can write add a docstring at the top saying "my_amazing_tool". I'm actually going to not put anything in here for now apart from a single variable which I'd put here by convention, but really you can do whatever you want. I'm going to call it again, use dunder - so double underscore -, version, double underscore, and also you know, semantic versioning 0.0.0.1 dev. We'll come back and use this variable a bit later, but for now it doesn't do anything.
-
-[11:57](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=717)
-What else? We want to make the typer example slightly more complicated. We're gonna now create a typer app like this. We're going to get rid of this bit at the bottom, because we don't actually need that anymore if we're not going to be running it as a script. We're not going to be calling that python file directly. Get rid of that. We're going to now use a python decorator called `app.command()` here, to tell "typer" that this is a command to be used within the command line interface. This is a normal secondary set, but a first very simple example is so simple that you almost never use that with "typer". This is what you always do and then you can have multiple functions here decorated with command and you can have multiple sub-commands within your CLI, using that way and groups of sub-commands and all kinds of things. With nf-core we have grouped sub-commands. You do `nf-core module updates` for example and those are separate sub commands, so that's how you do it here. But for now, this would work in exactly the same way as the example I showed you a second ago.
-
-[12:58](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=778)
-I'm going to add... because this is going to be a python package, it's really important to tell everybody about how to use it. I'm going to create a new LICENSE file. I am a fan of MIT, so I'm going to make it the MIT license and just paste in the text there that I've grabbed off the web and I'm going to make a README file, because this is going to turn up on github. We want people to know about what the tool is and how to use it, when they see the repo.
-
-[13:27](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=807)
-Okay hopefully you're with me, that's all the simple stuff. Now we'll get on to a slightly more complicated bit about how to take this and make it installable. This is one of the bits where it gets very variable about how you can do it. Typically within python you can use a range of different installable python packages to do your python packaging. It's quite meta. There's a very old one called "discutils" which you shouldn't use and there's one called "setuptools" which is most common. That's what I'm going to use today. Other people like packaging setups such as one popular one called "poetry". There are quite a lot of them so if you have a preference, great, go for it. Maybe in the discussion afterwards people can suggest their favorites, but for now I'm going to stick with setuptools and I'm going to say `setup.py`, which again this gets a bit confusing, but you don't necessarily need and "setup.cfg". I should dump in here - you don't need to remember how to do this. I don't remember how to do this. I don't think anyone really remembers how to do this. I do some browsing, type in "setuptools.py.io", you can see there's quite good docs on this website for setuptools. They tell you how to do everything, they talk through it's quite easy to read and they also talk through all the different options of how to build this stuff. You can do it with what's called a "pyproject.toml" file, which is probably what I'll start doing soon when it becomes slightly more standard. There's a setup.cfg file, which is what I'm going to do now and there's also some documentation about the old school way of doing it which is "setup.py". Tor now the "setup.py" file is just for backwards compatibility.
-
-[15:09](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=909)
-I'm going to do exactly what it tells me to do here. I'm going to say `import setuptools`, `setup()`, save and then I just forget about this file and never look at it again then everything else goes into this setup.cfg file and you can work through the examples here. For now I'm going to cheat for the sake of time and copy in the one I did earlier and just walk you through what these keys are quickly. Again I always copy this from the last project I did but you can copy it from the web very easily. "name" is important, "version" is important, because when you're updating a python package it needs to know which version number it is. And this then is using the special variable I set up here. Now if you look where it is, it's in the python module I made called "mytool" and the variable number is **version**. Here I'm saying, use an attribute, I could hard code it in this file if I wanted to, but I'm using it as an attribute and I'm using this variable which is under mytool **version**. You could call that whatever you want or you could just hard code it in this file. "Author", "description", "keywords", "license", "license files", "long descriptions" say it's markedown, that's just what shows on the PyPI website. "Classifiers" which are just categories, I always copy these without thinking. You can probably think a bit more about it if you want to. There is some slightly more interesting stuff down here. The minimum required version of python, which might be important for you. Where you put your source code, in this case I say look for any python modules you can find and look in the directory called source. If you call that something different you put that here and then that's looking for .init files like that. Then saying we require a bunch of other python packages here. Here I'm saying look at this file called requirements.text. If you didn't want to have that file for whatever reason you can also just list them in this file here as well.
-
-[17:12](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1032)
-Finally "console scripts". This is the bit which actually makes it into a command line tool and here we say I want to call my tool myawesometool. When someone types that into the command line, what I want python to do is to find the module called "mytool", which we've created here, with the init file. I've actually got this script called "analysis" here. Again, this file name could be whatever you want. Then look for a variable called "app". Here our variable is called. But I could also put a function name and stuff here as well, if I wanted to. For typer I'm going to say ".app".
-
-[17:53](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1073)
-Now, python will know what to do when I install my tool and... moment of truth, let's try and install it and see what breaks. Pip python package index uses pip and I'm going to say `pip install`. I could just do full stop for my current working directory and that will work, but I'm actually going to add the `-e` flag here, make it editable. What that does is, instead of copying all the files over to my python installation directory, it soft links them and that's really useful when developing locally because I can make edits to this file, hit save and adapt the reinstall tool every single time. I just am always in the habit of using `-e` pretty much all the time. Let's see what happens... yeah, it broke. "Setup not found". That's because I got the import wrong. `from setuptools import setup` and then set up search. I could have done set up like that, that should work as well. Let's try again. Great, you can see it's running through all those requirements. It's installing all the back end stuff which is like matplotlib and "typer", and it installed! So now, what did I call it? myawesometool! If I do `myawesometool --help`... Hooray! It works! Look at that, we've got a command line tool! Now I can run this wherever I am on my system. I don't have to be in this working directory anymore, doesn't matter if I... lets give an example... do testing. If I could do `myawesometool "This is a test"`. There we go. Now we've got that file created in there, because that was my working directory and sure enough, I got a nice title. Brilliant!
-
-[20:03](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1203)
-We have a command line tool, it installs locally, it works and it's got a nice command line interface. We're nearly there. The final thing then is to take this code and put it onto the python package index. If you start digging around on google, you will find instructions on how to do this and it will say run a whole load of command line functions. Run those, do this and that will publish it. There's a sandbox environment where you can test first and you have to sign up to PyPI, obviously, and register and create a project and everything. But my recommendation is to keep things simple and the only way I do it now is to do all of this through github actions and automate your publication of your package. That's all I'm going to show you today, because I can walk you through that quite easily and it's the same logic. If you've not used github actions before, the way it works is, you create a directory called `.github` - it's a hidden directory - and a subdirectory called `workflows`. In here I'm going to create a new file, which can be called anything "deploy-pypi.yaml".
-
-[21:16](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1276)
-Then I'm going to cheat and copy, because otherwise it's going to take me a while to type all this in. I'm going to walk you through it. This is a yaml file that tells github actions what to run and when to run it. We have a name up here, which can be anything, and firstly we have a trigger. This tells github: run this github action. Whenever this repository has a release and the event type is published, so whenever you create a new release on github and you click publish, this workflow will run and it'll run on a default branch. Then we have the meat of it. What is it actually doing? It's running on Ubuntu. It's checking out the source code first and setting up python. Now I install the dependencies manually here. I'm not totally sure if this is actually required or not, but it was in the last github actions I did, so I thought I'd do it again. First command is just upgrading pip itself and setting up setuptools and stuff. Then we do the `pip install .` command again, just to install whatever's in the current working directory. Now on github actions your tool is installed and then we run this python command with setup.py, which is just calling setuptools and saying `sdist`, the setuptools distribution and create a bdist_wheel. We don't need to know what that means or why it's there, but that's just the files that the python package index needs. Now it's built the distribution locally and then finally we publish it.
-
-[22:40](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1360)
-You can see where I copied it from. We publish it to the python package index. This is a check just to make sure if anyone has forked your repository. Don't bother trying to do this, because it obviously won't work. I usually just put this in, check if your github repository is called and then use this python package index action, which is a github action that someone else has written. I'm using a password and this is a github action sequence and this is an api token that you can get from the python package index website when you're logged in. That gives the github actions all the credentials it needs to be able to publish the python package for you. That's it. If everything works well, you stick all this on github you make it all lovely, you hit release and then you will be able to watch that workflow running and it will say "workflow published".
-
-[23:33](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1413)
-Remember to change this version when you run it more than once, because if you try and publish the same package twice with the same version number on python package index, it will fail. As long as you bump that, then everything should work and you should end up with a package on on pypi. When you have that package you'll be able to do name, that's I think that's what python package index uses. You'll be able to `pip install mytool` from anywhere. Anyone will be able to do that and it will just work and that's it. At that point you can pat yourself on your back, think how amazing the job you've just done is and how anyone can now use your analysis tools. Prepare yourself an onslaught of bug reports to github and take the next step and scaffold that pypi recipe into bioconda and do all the last stuff. But like I say, that's in a different talk and I'm not going to swamp everyone by talking about that too much today. Hopefully that made sense for everybody. Shout if you have any questions and I'd love to hear what workflows other people have and whether I made a mistake and if you think I should do it in a different way and if your way is better.
-
-[24:42](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1482)
-(host) Thank you so much. It's nice to see how some of the magic actually happens in the background. Do we have any questions from the audience?
-
-(question) I've got one. Have you tried cookie cutter to automate all of this?
-
-(answer) When I was prepping this with like five minutes to go, I was desperately trying to find a link for a really nice project which I've seen. I've spoken to the authors and I cannot remember the name of it. There's a few of them floating around but there's one definitely for bioinformatics where you can use a cookie cutter project and it scaffolds an entire python package index project for you, with all of this stuff in place. It's probably much better and quicker. I purposefully chose not to show that today, because I was thinking of going from someone who already has a script which is working through, and trying to explain what all the different stuff is doing. If you're starting from scratch I would absolutely do that and if anyone has any good links for projects or can remember the projects I'm talking about, please post them here or in slack.
-
-(question cont.) I'll just drop the link in the chat. If someone doesn't know what we're talking about.
-
-(answer cont) That links for cookie cutter itself, right, which is just like a generic templating tool. There are cookie cutter projects which people have created like template repositories. Specifically for python, if that makes sense.
-
-[26:09](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1569)
-(question) We do have another question in the chat. Someone is asking why not `pyprojects.toml`?
-
-(answer) This is something else I was debating on the start. This is a bit of history here. When I started creating my first python projects you always used that `setup.py` file and you still can. It's a bit like how Nextflow config files are just a groovy script, where you can do whatever you like. `setup.py` is the same. It's just a python script, where you can do whatever you like.Which is wonderful and horrifying! Slowly over the last... the python community moves slowly... so for the last many years, there's been a move away from that way of doing things into more standardized file types and there are two which are being used: there's a `setup.config` file, which is exactly the same thing but in a structured file format, and the other one is `pyproject.toml`, which is the newer and better way of doing things. `pyproject.toml` is nice because it's also a standard for many other python tools with configs. If you want to use black to lint your code, which you should, because black is amazing, you'll put your settings in `pyproject.toml`. If you use, I don't know, mypy for type linting or any of these flake8 tools or whatever and it will be linting tools and stuff. They all stick their settings in `pyproject.toml`, which is great because you have one config file for everything to do with your python project. That is much nicer and you can also do all of your setuptools python stuff in there. There are a couple of things which I found I think are missing. Correct me if I'm wrong, I don't think you can point it to a `requirements.txt` file for all requirements. It's quite useful having that file sometimes, maybe it doesn't matter... I think the setup tools website says it's like in beta and it might change, so I thought I'd play it safe today and go for `setup.cfg`, which is newish, but fairly safe. But yeah, `pyproject.toml` is, if you can make it work for you, probably a nicer way to do it.
-
-[28:13](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1693)
-(host) We have some more comments. There was a link posted to Morris' cookie cutter package which has not been tried out, at least not by the person who posted it. It says ironically flake8 can't actually work with settings from pyproject.toml or at least couldn't a couple of months ago.
-
-(speaker) Cookie cutter, this might look familiar to anyone who's used the nf-core template. We used to use cookie cutter for nf-core back in the early days and still use the underlying framework, which is called ginger. That's where this double squiggly brackets comes from, it's a templating system as you can see. Here you've got all these different settings, therefore with license options and a name and stuff and then these will go into all these double bracket things. The idea is, you do `cookie cutter run` or `cookie cutter`, I can't remember what the command is now build. Then you give it this github url and it will ask you a few questions which will just replace these defaults here. Then it will generate this package here, but with all the template placeholders filled in.
-
-[29:26](https://www.youtube.com/watch?v=hOuS6mXCwhk&t=1766)
-(host) Great! Do we have any more questions? It doesn't seem so. Thank you very much for this great talk. Before we wrap this up entirely I also have something to mention. Next week's bytesize talk is going to be one hour late. I will also post this again in the bytesize channel. Very interestingly there will be a talk from people that were part of the mentorship program. The deadline for the mentorship program just got extended, so it's actually for anyone who is still questioning if they should join or not. This is your chance to actually listening to people who have been part of it and they give some impressions. With this I would like to thank Phil again I would like to thank everyone who listened. Of course as usual, I would like to thank the Chan Zuckerberg Initiative for funding our talks and have a great week everyone.
-
-
diff --git a/markdown/events/2023/bytesize_quantms.md b/markdown/events/2023/bytesize_quantms.md
deleted file mode 100644
index 2859505475..0000000000
--- a/markdown/events/2023/bytesize_quantms.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: 'Bytesize: nf-core/quantms'
-subtitle: Julianus Pfeuffer (Zuse Institute Berlin) and Yasset Perez Riverol (EMBL-EBI)
-type: talk
-start_date: '2023-05-30'
-start_time: '13:00 CEST'
-end_date: '2023-05-30'
-end_time: '13:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=pBzelkgrPgQ
-embed_at: quantms
-location_url:
- - https://www.youtube.com/watch?v=pBzelkgrPgQ
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core/quantms
-
-This week, Yasset and Julianus will introduce to us the nf-core pipeline quantms. nf-core/quantms is a bioinformatics best-practice analysis pipeline for Quantitative Mass Spectrometry (MS). Currently, the workflow supports three major MS-based analytical methods: Data-dependant acquisition (DDA), either (i) label-free or (ii) isobarically labelled quantitation (e.g. TMT, iTRAQ), and (iii) Data-independent acquisition (DIA) with label-free quantification.
-More information about nf-core/quantms can be found on the [nf-core website](https://nf-co.re/quantms)
diff --git a/markdown/events/2023/bytesize_survey_2023.md b/markdown/events/2023/bytesize_survey_2023.md
deleted file mode 100644
index f2db93fd67..0000000000
--- a/markdown/events/2023/bytesize_survey_2023.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: 'Bytesize: nf-core community survey 2023'
-subtitle: Christopher Hakkaart - Seqera Labs
-type: talk
-start_date: '2023-05-16'
-start_time: '13:00 CEST'
-end_date: '2023-05-16'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=hnI3IgGNq3o
-location_url:
- - https://www.youtube.com/watch?v=hnI3IgGNq3o
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: nf-core community survey 2023
-
-This week, Christopher Hakkaart ([@christopher-hakkaart](https://github.com/christopher-hakkaart)) will discuss the results of the nextflow/nf-core community survey.
diff --git a/markdown/events/2023/bytesize_transcripts.md b/markdown/events/2023/bytesize_transcripts.md
deleted file mode 100644
index 40d2676c9f..0000000000
--- a/markdown/events/2023/bytesize_transcripts.md
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: 'Bytesize: transcripts of bytesize talks'
-subtitle: Franziska Bonath - NGI, Stockholm
-type: talk
-start_date: '2023-01-31'
-start_time: '13:00 CET'
-end_date: '2023-01-31'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=amwwmFMwOYw
-location_url:
- - https://doi.org/10.6084/m9.figshare.21995243.v1
- - https://www.youtube.com/watch?v=amwwmFMwOYw
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: transcripts of bytesize talks
-
-This week, Franziska Bonath ([@FranBonath](https://github.com/FranBonath)) will talk about her work to generate transcripts of bytesize talks and what these might be used for in the future.
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=amwwmFMwOYw&t=1)
-(host) Hi, Maxime here. First of all, I'd like to thank the Chan Zuckerberg Initiative to help us doing these bytesize talks. And today, Franziska Bonath will present us how the transcript of the bytesize talks happen. It's a very meta bytesize talk today. And as usual, please use Slack for your questions. Now, it's up to you, Fran.
-
-[0:28](https://www.youtube.com/watch?v=amwwmFMwOYw&t=28)
-Okay, thank you. Welcome, everyone. I'm talking about bytesize talk transcripts. Just very briefly, what we're going to do today. I will handle the question of all questions, why to transcribe bytesize talks at all, and then briefly go in how we did it and why, what we're going to do in the future.
-
-[0:52](https://www.youtube.com/watch?v=amwwmFMwOYw&t=52)
-Why, why are we going through all this pain? And the big answer is that we want to be more inclusive. This is one of the reasons why we got funding from the Chan Zuckerberg Initiative. But of course, we also have a desire to do this. Not everyone is able to hear things. If you rely on the transcripts that are automatically done by YouTube, for example, it can be very difficult to get the gist of what the talk is about. Also, even if you hear perfectly, not everyone will be able to understand English well enough to figure out what the talk is about. In addition, we have the speakers from all over the world. There might be accents that are a bit more difficult to follow. And so, having a really good transcript will help understand these talks a lot better.
-
-[1:54](https://www.youtube.com/watch?v=amwwmFMwOYw&t=114)
-There's other reasons. One is, of course, to improve the subtitles for YouTube. But also, if you have the transcript in itself without the video, you should be able to understand it. And it will be a resource for understanding of details that are maybe not in the slides. There will be, hopefully at least, the correct names of all the tools that are used. You can look that up, and then it will be easier to search for that online. But also, once you have a text, there's a lot of things you can do with that text. You can translate the text, you can put it into some AI-based thing and have it give you a summary of the text. There's a lot of things that we might start to think of in the future, and it will be text-based. And the better the information is that you give in, the better it is what you're going to get out.
-
-[2:55](https://www.youtube.com/watch?v=amwwmFMwOYw&t=175)
-Where can I find these transcripts? It's, at the moment, a bit difficult. I admit that. I'm going to quickly show you. What you have to do at the moment is, you have to go to... [...] If you're on the website, you go to events, and then you can search for only bytesize here. And this will be the upcoming ones. But if you go to the past ones, for example, let's go to taxprofiler, and you scroll down. What you will find here is the embedded YouTube video. And at the bottom, you will have the transcripts. You can go directly to one of those. It will show up there. And this is, at the moment, the only way how you will get the transcripts for any talk. But it will be uploaded to YouTube eventually. Then I go back to my slides.
-
-[4:13](https://www.youtube.com/watch?v=amwwmFMwOYw&t=253)
-How did we do this? We did try to use the automated transcripts on YouTube first. It is horrible. Basically, what happens is that you have a lot of these oohs, and aahs, and ohms that are not removed at all. Also, you will have no punctuation whatsoever. You have to add the capitalization after every full stop that you have in your transcript. It takes forever. It probably would have been quicker to just write it while you hear it. That did not work. And that means in comes a new tool, which I'm forever grateful to Matthias Zepper, who introduced me to it. It's called Whisper. And at the moment that I started this transcript, Whisper was only available as a tool as is. But from now on, you can also have a Nextflow pipeline for Whisper. You can find it under this link. And Whisper helped with a lot. It does add punctuation. It does surprisingly recognize a lot of the tools that we're using. And it removes all the emms. It removes a lot of the double mentions. If you're talking normally, often you stop for thinking about something and then you repeat what you have just said before. And so, these double mentions, they get edited out automatically, which is super nice. I can only recommend Whisper if you ever do transcripts of any video yourself.
-
-[5:51](https://www.youtube.com/watch?v=amwwmFMwOYw&t=351)
-But even though Whisper is great, it is not perfect. I don't think any automated talk transcript ever will be perfect. The main things that we have to do is add timestamps so we have nice sections that belong together. But of course, also names, specifically names of people, but also of tools often get not identified correctly. You have to check and edit those. Specialized terminology is also not recognized because they are probably not in the library of Whisper. And also sometimes sentences are super long. It might be ellipsis or that someone had a thought, stopped in the thought and continued afterwards, which is totally fine if you're just listening to a person. But if you want to just read it, it's very difficult to understand. These kinds of things we have to manually change afterwards.
-
-[6:55](https://www.youtube.com/watch?v=amwwmFMwOYw&t=415)
-To give you a kind of an idea. Our most favorite words that are part of pretty much every bytesize talk, nf-core and Nextflow are very commonly misspelled. Nf-core very typically gets misspelled to NFL and NF4. I don't exactly know why, but in every third or so transcript I read those. And of course, you also have just some misspelling of nf-core itself. Sometimes it does pick it up and in very, very rare cases, it will also type it correctly. Nextflow, it also has diverse ways of how it can be written. In the latest one, I had it transcribed to "next floor". But then of course, there's just some random things that don't repeat. Like, "elusion" will be transcribed to "illusion", "iterations" to "situations". One of my favorites was "bioinformaticians" to "by partitions". Surprisingly, bioinformaticians, which is not that uncommon a word, I would say, gets transcribed a lot wrong. And you can imagine that if you have ribosomal RNA mistranscribed to rivals of RNA, the sentence will not make any sense. The handy overall summary can also become a handy oral summary, which would make sense, but which would change the meaning a bit. And just one other example, if you have a sentence like, "these processes take a sort of BAM from the samples", if you just read the sentence, I would have not guessed specifically what this would mean. Once I listened to the transcript, it turned out that it means "these processes take in a sorted BAM from SAMtools". This, I think, shows very clearly that manual work is necessary and that it's worth going through this and make these changes and not just rely on an automated transcript.
-
-[9:07](https://www.youtube.com/watch?v=amwwmFMwOYw&t=547)
-Now we're done, right? We are up to date. Everything's fine. Not quite, obviously. We have to add these transcripts to the subtitles on YouTube, which will happen in the not too far future, I hope. And also, what we want to try and see if we can do translations of these YouTube transcripts that we generate now, to have them in different languages, which would be super nice. Of course, bytesize talks are not finished yet. In fact, this very bytesize talk is going to be transcribed. We have this kind of Inception way where a bytesize talk that talks about bytesize talk transcripts is going to be transcribed. Anyway, this was all. I would like to thank Matthias for his enormously helpful tip for Whisper. And also, he was writing a container, I think, for Whisper. Marcel and Christopher, who had to approve all my pull requests for the transcripts. Of course, all the other reviewers, specifically the speakers that went through this horrendous task of reading their own talks. I'm not looking forward to this. Thank you very much. Now I'm open to any questions. Of course, there's no repository nf-core pipeline. I just took the... Anyway, thank you very much, everyone. Off to Maxime.
-
-[10:53](https://www.youtube.com/watch?v=amwwmFMwOYw&t=653)
-(host) Good. That was brilliant. Thank you very much. I will try to allow everyone to unmute themselves if you have questions. We haven't done that in a while. Where is this?
-
-(speaker) Yeah, this is what you get when you use the template. Unnecessary things get included in the talk.
-
-(host) Is there any question, actually, like, oh, yes.
-
-(question) Jasmin is asking, how was the transcript added to YouTube? Will they be visible as normal subtitles?
-
-(answer) So, yeah, I did look a bit into that. You can add your own subtitles in YouTube if you are the owner of the YouTube channel. As nf-core, I can add subtitles, and it will be one of the different subtitles that you can choose from. I think it's going to be called... No, I don't recall how it's called. But I think it will be the default option as subtitles.
-
-[11:56](https://www.youtube.com/watch?v=amwwmFMwOYw&t=716)
-(host) Okay. Do we have one last question, or are we good for today? I think we are good for today. Thank you again, Fran, for this presentation. Definitely that was a question I had, how everything was happening and stuff. Thank you very much for inviting me into all that. And see you soon. Thank you.
-
-
diff --git a/markdown/events/2023/bytesize_translations.md b/markdown/events/2023/bytesize_translations.md
deleted file mode 100644
index 4cea9ccfc8..0000000000
--- a/markdown/events/2023/bytesize_translations.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: 'Bytesize: training translations'
-subtitle: Marcel Ribeiro-Dantas, Seqera Labs
-type: talk
-start_date: '2023-04-25'
-start_time: '13:00 CEST'
-end_date: '2023-04-25'
-end_time: '13:30 CEST'
-youtube_embed: https://www.youtube.com/watch?v=WfHwOrB7VfA
-location_url:
- - https://www.youtube.com/watch?v=WfHwOrB7VfA
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: training translations
-
-In this weeks bytesize talk, Marcel Ribeiro-Dantas ([@mribeirodantas](https://github.com/mribeirodantas)) is talking about his efforts to translate nf-core/nextflow training material into other languages including Portuguese and Spanish.
diff --git a/markdown/events/2023/bytesize_variantcatalog.md b/markdown/events/2023/bytesize_variantcatalog.md
deleted file mode 100644
index 1b6d788ea0..0000000000
--- a/markdown/events/2023/bytesize_variantcatalog.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: 'Bytesize: variantcatalogue'
-subtitle: Solenne Correard, University of British Columbia, Vancouver, Canada
-type: talk
-start_date: '2023-02-21'
-start_time: '13:00 CET'
-end_date: '2023-02-21'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=Em1cHCLQQ_c
-location_url:
- - https://doi.org/10.6084/m9.figshare.22140854.v1
- - https://www.youtube.com/watch?v=Em1cHCLQQ_c
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: variantcatalogue
-
-This week, Solenne Correard ([@scorreard](https://github.com/scorreard)) is introducing us to the nextflow pipeline `variantcatalogue`. Though not an nf-core pipeline (yet), `variantcatalogue` is using nf-core derived material to aid in the creation of the pipeline.
-
-The variant catalogue pipeline is a Nextflow workflow designed to generate variant catalogues (also called variant libraries): a list of variants and their frequencies in a population, from whole genome sequences. More information on the pipeline can be found [on Github](https://github.com/wassermanlab/Variant_catalogue_pipeline) or in the [associated preprint](https://www.biorxiv.org/content/10.1101/2022.10.03.508010v2).
-
-Video transcription
-**Note: The content has been edited for reader-friendliness**
-
-[0:01](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=1)
-Hello, everyone, and welcome to this week's bytesize talk. The speaker today is Solenne Correard from the University of British Columbia in Canada, and she is going to talk about variantcatalog. This is a Nextflow pipeline, but it's not part of nf-core yet. Variantcatalog is used for population analysis from whole-genome sequencing, and specifically to identify variants and their frequencies. Since Solenne is living in Canada, and due to the big time difference, we decided that it's best to record this talk. Therefore, if you have any questions, please ask them in the Slack bytesize channel. As usual, I would like to thank Solenne for her time and the Chan Zuckerberg Initiative for funding the bytesize talk series. But now, without further ado, I hand over to Solenne.
-
-[0:52](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=52)
-Hi, everyone. Welcome to this week's nf-core bytesize talk. I am Solenne Correard. I'm a research associate at BC Genome Sciences Center in Vancouver, Canada. Today, I'm going to talk to you about the variantcatalogue pipeline. First, I would like to acknowledge the lands on which I work, live, and play. Those are the traditional, ancestral, and unceded territories of the Musqueam, Squamish, and Tsleil-Waututh nations.
-
-[1:22](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=82)
-First, what is a variant catalogue or a variant library? When we talk about genomics and DNA, a variant catalogue is the frequency of the variants within a population. For example, in this population of five individuals, they all get their whole genome sequenced and at a certain position in their DNA, some individuals carry an A, and some individuals carry a C. From that individual information, we can deduce the frequency of each allele in the population. In this example, the A allele has a frequency of 0.06, and the C allele has a frequency of 0.04. This is the main information that is within the variantcatalogue pipeline, the frequency of the variants within the population.
-
-[2:08](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=128)
-When do we use a variant catalog? There are several ways to use it, but a very good example is through the nf-core/raredisease pipeline. During the variant annotation and prioritization step of that pipeline, they use gnomAD. GnomAD is the biggest variant catalogue to date. The reason they use it is because a variant that is frequent in a population is unlikely to be responsible for a rare disease. When we are looking for the variant responsible for the rare disease in a kid, we can already filter out all the variants that are frequent in the population. As I mentioned, gnomAD is the biggest variant catalogue to date. It helped tons of families to get a diagnosis in rare diseases. But when we look at the ancestries of the individuals within gnomAD, we can see that most of the individuals are from European ancestry. Some populations are not even represented. They are not represented or underrepresented. This lack of representation from some population is leading to an inequity in genomic care. Because if the kids affected with a rare disease is from an ancestry that is not represented in the variant database, then it's harder to remove the variants that are frequent from this population, and so harder to give a diagnosis to this kid.
-
-[3:47](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=227)
-This is a known issue. Several variant catalogs were generated around the world. For example, Iranome with the Iranian population, or KOVA with the Korean population. The project I was working on is the Silent Genomes Project in Canada. It's a partnership with the Indigenous populations of Canada to build the Indigenous background variant library. A very similar project is taking place in New Zealand with genomics Aotearoa, where they are working with the Mฤori population. When we were working on the Indigenous background variant library, we needed a pipeline to process the data to get the variant frequencies.
-
-[4:30](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=270)
-Some pipelines existed, but none of them were fulfilling the three constraints that we had. The first one is that we wanted the pipeline to rely on open access tools that were previously benchmarked, because we didn't want to develop any new software or new tool. We wanted it to be comprehensive. By that, I mean it had to include single nucleotide variants, but also mitochondrial variants, structural variants, short tandem repeats, and mobile element insertions. All those classes of variants are known to be potentially implicated in rare disease. It's very important that all of them are present in a variant catalogue. Finally, we wanted it to be able to work on local servers or on the cloud, because different projects may have different constraints.
-
-[5:23](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=323)
-We developed the variantcatalogue pipeline that you can see on the left here. This is just an overview, and I'm going to describe each part in more details. But the idea is that it takes as input FASTQ files from participants. It outputs VCF files, so variant calling files with information about the variants, the position, the allele, the frequency of this variant within the population, which is the main information we want, the frequency by sex, as well as some annotation. The pipeline is divided in four subworkflows that can work independently, or all of them can be run in parallel, or at least in the same pipeline.
-
-[6:09](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=369)
-The first subworkflow is the mapping subworkflow. It takes as input short read paired-end sequences for individuals, as well as a reference genome. It has been developed so far for GRCh37 and GRCh38. The mapping tool is bwa mem, and it outputs one BAM file per individual. The second subworkflow is the mitochondrial variant subworkflow. It is very much based on the work from Laricchia et al. that was published in 2022. It's therefore very similar to the pipeline that is used by gnomAD for their mitochondrial variants. It takes as input the BAM files previously generated, the variant caller for the mitochondrial variants is GATK Mutect2.
-
-[7:06](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=426)
-The reason why there is a parallel section here is because the mitochondrial DNA is circular. To be able to map the mitochondrial reads against this reference genome, it is linearized with a fake breakpoint around zero here. The reads that are supposed to map over the fake breakpoint do not map correctly, hence the variants located around these regions are not called correctly. To address that issue, they developed a shifted reference genome where the fake breakpoint is located on the other side, which allowed to call the variants correctly in this region. Then the variants are lifted over, the information is merged into several VCFs. I will detail the steps at the bottom later.
-
-[8:04](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=484)
-The third subworkflow is the single nucleotide variant subworkflow. It is the most straightforward one. For variant calling, we decided to use DeepVariant. We are using GLnexus for the joint calling. For the fourth subworkflow, which is the structural variant subworkflow, it was mostly developed by Mohammed Abdallah, a postdoc within the Wasserman Lab. It was decided to use Smoove and Manta for structural variant callers. Jasmine is used to merge the variants, and Paragraph is used to genotype the structural variants within the individual data. Then the information is merged with BCFtools. For the short tandem repeats, we are using ExpensionHunter. For the mobile element insertions, we are using MELT.
-
-[8:57](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=537)
-All the variant calling part is very similar to other pipelines, such as nf-core/raredisease or nf-core/sarek. What is really specific about this pipeline is the steps at the bottom here. It's the sample quality control, variant quality control, allele frequency calculation, and also sex imputation. The reason for that is the quality control is performed differently if you have just one individual or a trio versus if you have a population. All of this is performed within Hail, which is a Python-based analysis tool that is also used by gnomAD and some other variant catalogue pipelines. As I said, it performed some quality control as well as the variant frequency calculation. Then the variants are annotated using VEP.
-
-[10:00](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=600)
-That was just an overview of the pipeline. This is the actual complete pipeline. It's available on the Wasserman lab GitHub, and it's described in more detail in this preprint. It was tested on 100 samples, and it works. The details of the number of CPU hours, as well as the number of samples and variants that were filtered out by the quality control steps is available within the preprint. However, this version still rely on locally installed software. That is an issue for two reasons. First, it's really hard for other projects to use. Second, it's impossible to test very easily. We are used to test other nf-core and Nextflow pipelines with just one command line.
-
-[10:48](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=648)
-Therefore, the future for the pipeline is to move it to an nf-core level pipeline. My goal is to move the mapping as well as the single nucleotide variants workflows during next month's hackathon. If anyone wants to team up with me for the code or for coding review, please reach out. After that, we will have to move the mitochondrial and the structural variants workflows also to nf-core. This will allow first other people to try it more easily, but it will also force us to do better documentation. That is very important to make sure that other groups can use the pipeline. If the documentation is good, then it's easier for other people to try and use this pipeline.
-
-[11:40](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=700)
-To test the pipeline, I actually needed to create a new data set because the one that were available within nf-core did not fit my needs. I needed paired-end short read FASTQ files that included part of an autosome as well as parts of chromosome X and Y to impute sex for the individuals as well as read mapping to the mitochondrial chromosomes to test subworkflow 2. I also needed reads supporting the presence of a structural variance to be able to test the subworkflow 4. And several samples, including XX and XY individuals, to be able to test the variant frequency calculation part. This will hopefully be available to others soon, in case you need them to test your tools or your pipeline. I will also include the reference genome for the same region and additional files such as the short tandem repeat catalog, the mitochondrial reference file and the shifted one I mentioned before.
-
-[12:46](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=766)
-In other future developments, I would like to include more reference genomes, including the T2T for humans but also non-humans reference genomes. I would like to include more software, for example, to give the opportunity for the user to decide which mapper they want to use, which variant callers they want to use. We also want to make sure that it fits with the nf-core/raredisease pipeline. I know that they use slightly different callers for structural variants. It would be interesting to make sure that there is a good fit. It's also possible to include additional metrics such as ancestry inference, mitochondrial group assignment or relatedness calculation. Those are metrics that are often associated to variant catalogue pipelines. It was out of the scope for the Silent Genomes Project, but we understand the relevance for other projects and it would be great to also include them and have them as an option.
-
-[13:53](https://www.youtube.com/watch?v=Em1cHCLQQ_c&t=833)
-I would like to acknowledge everyone within the Wasserman Lab, especially Wyeth Wasserman, the team leader, Mohammed Abdallah, who worked a lot on the structural variant pipeline subworkflow and the rest of the pipeline, as well as Brittany Hewitson, the Silent Genomes team, and also all the nf-core community. It's been a very welcoming community and I've learned a lot. Obviously, this is not live. If you have any questions, please reach out on the nf-core/variantcatalogue channel to sparkle a discussion and start threads on different things. If you prefer to reach out directly to me, you can do it through Twitter or GitHub. Thank you for your attention and have a great rest of your day.
-
-
diff --git a/markdown/events/2023/bytesize_workflow_safety.md b/markdown/events/2023/bytesize_workflow_safety.md
deleted file mode 100644
index 8bad627d17..0000000000
--- a/markdown/events/2023/bytesize_workflow_safety.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: 'Bytesize: Workflow safety and immutable objects'
-subtitle: Rob Syme, Seqera Labs
-type: talk
-start_date: '2023-05-23'
-start_time: '13:00 CET'
-end_date: '2023-05-23'
-end_time: '13:30 CET'
-youtube_embed: https://www.youtube.com/watch?v=A357C-ux6Dw
-location_url:
- - https://www.youtube.com/watch?v=A357C-ux6Dw
----
-
-# nf-core/bytesize
-
-Join us for our **weekly series** of short talks: **โnf-core/bytesizeโ**.
-
-Just **15 minutes** + questions, we will be focussing on topics about using and developing nf-core pipelines.
-These will be recorded and made available at
-It is our hope that these talks / videos will build an archive of training material that can complement our documentation. Got an idea for a talk? Let us know on the [`#bytesize`](https://nfcore.slack.com/channels/bytesize) Slack channel!
-
-## Bytesize: Workflow safety and immutable objects
-
-This week, Rob Syme ([@robsyme](https://github.com/robsyme)) will talk about how to avoid introducing subtle concurrency bugs in your Nextflow workflows by safely modifying objects (or more specifically, by _not_ modifying objects) when writing Groovy closures.
diff --git a/markdown/events/2023/hackathon-march-2023/uk-igc-edinburgh.md b/markdown/events/2023/hackathon-march-2023/uk-igc-edinburgh.md
index 7a2893b56f..f42d84dee5 100644
--- a/markdown/events/2023/hackathon-march-2023/uk-igc-edinburgh.md
+++ b/markdown/events/2023/hackathon-march-2023/uk-igc-edinburgh.md
@@ -6,7 +6,7 @@ start_date: '2023-03-27'
start_time: '09:30 BST'
end_date: '2023-03-29'
end_time: '16:30 BST'
-location_name: MRC Insitute of Genetics and Cancer
+location_name: MRC Institute of Genetics and Cancer
# address: Medical Education Centre Computing Lab 1, Outpatients Department, Western General Hospital, Crewe Road, Edinburgh, EH4 2XU
# location_url: https://www.med.scot.nhs.uk/hospitals/wgh/medical-education/education-centre
# location_latlng: [55.9624822, -3.2383988]
diff --git a/markdown/events/2023/meetup_may_groovy.md b/markdown/events/2023/meetup_may_groovy.md
index 779861db73..df1afb83a1 100644
--- a/markdown/events/2023/meetup_may_groovy.md
+++ b/markdown/events/2023/meetup_may_groovy.md
@@ -1,7 +1,7 @@
---
title: 'Workflows Community Meetup - All Things Groovy'
subtitle: Supercharge your Nextflow pipelines with Groovy โ Sateesh Peri (nf-core) and Simon Murray (Wellcome Sanger Institute)
-type: talk
+type: tutorial
start_date: '2023-05-23'
start_time: '15:00 CET'
end_date: '2023-05-23'
@@ -11,6 +11,8 @@ address: https://sanger.zoom.us/j/93327483010?pwd=TEk5UTJEUVdZMGFGWHJKSURGcUVZZz
location_url: https://goo.gl/maps/h68RvwRHsNLWqtUc9
---
+> Organised by the [the BioDev Network](https://www.youtube.com/@biodev-network)
+
**Workflows Community** is inviting you to a meetup focused on supercharging your Nextflow pipelines with Groovy.
1. **Sateesh Peri (nf-core)** will do a hands-on exploration of **nf-test**, a simple test framework specifically tailored for Nextflow pipelines. This presentation will delve into:
diff --git a/markdown/events/2023/meetup_nextflow_in_action-oct-2023.md b/markdown/events/2023/meetup_nextflow_in_action-oct-2023.md
new file mode 100644
index 0000000000..8f13845507
--- /dev/null
+++ b/markdown/events/2023/meetup_nextflow_in_action-oct-2023.md
@@ -0,0 +1,40 @@
+---
+title: 'Nextflow in Action'
+subtitle: Matthieu Muffato, Luke Paul Buttigieg and Ricardo Ramirez
+type: talk
+start_date: '2023-10-12'
+start_time: '14:00+01:00'
+end_date: '2023-10-12'
+end_time: '15:00+01:00'
+location_name: Green Room, Hinxton Hall, Wellcome Genome Campus
+location: https://sanger.zoom.us/j/96537565366?pwd=UWpYVVdsbTZFREZkbndaZGNkNGJrdz09
+youtube_embed: https://youtu.be/w4RGam3T8iQ
+---
+
+> Organised by the [the BioDev Network](https://www.youtube.com/@biodev-network)
+
+# Nextflow in Action
+
+Join us for our **monthly series** of talks: **โNextflow in Actionโ**.
+
+**20 minutes** for each talk + questions, we will be focussing on topics about how different organisations are using and developing with Nextflow.
+These will be recorded and made available on [YouTube](https://www.youtube.com/@workflows-community).
+It is our hope that these talks / videos will help us learn from the greater Nextflow community. If you want to present, please reach out to [Priyanka Surana](mailto:ps22@sanger.ac.uk).
+
+## Matthieu Muffato, Tree of Life, Wellcome Sanger Institute
+
+**Adopting Nextflow in Sanger Tree of Life**
+
+Tree of Life is the latest scientific programme of the Wellcome Sanger Institute, aiming at producing reference genome assemblies for tens of thousands of species. We decided early on that Nextflow would be the workflow manager of choice - what an adventure it has been ! In this presentation, I will tell you about our journey, starting from training users, setting up development environments, right up to advertising our complete pipelines and reaching out to users outside of the department, with some insights into deploying and automating the pipelines.
+
+Recording: [YouTube](https://youtu.be/w4RGam3T8iQ)
+
+## Luke Paul Buttigieg and Ricardo Humberto Ramirez Gonzalez, Genomics England
+
+**Porting clinical genome analysis to Nextflow at Genomics England**
+
+Genomics England provides whole genome sequencing diagnostics to the Genomic Medicine Service (U.K), a free at the point-of-care, nationwide, genomic diagnostic testing service, with ambitious targets of processing 300,000 samples by 2025. Currently, all clinical bioinformatics is processed using a clinical-standard certified, internally developed workflow engine (Bertha). We are migrating to a new solution (Genie) which combines Nextflow and Nextflow Tower with custom functionality, so we can focus on our core mission to enable equitably accessed, genomics medicine for all. Genie should help us support newer use cases quicker, across different infrastructures such as cloud, and uses a standard workflow definition language. We have developed an approach to migrate at speed in an agile and iterative fashion. We are mocking the services in the workflow environment in Docker containers to allow us to run continuous integration tests. We are using an automated comparison testing framework to compare the existing system with the new one to detect regressions. Later, we will iteratively refactor the workflows, breaking up the Singularity image and optimising for performance. In this talk, we will describe this migration strategy, risk management and lessons learnt while working through this large-scale effort.
+
+Recording: [YouTube](https://youtu.be/xLQWFvbHszE)
+
+---
diff --git a/markdown/events/2023/meetup_nextflow_in_action-sept-2023.md b/markdown/events/2023/meetup_nextflow_in_action-sept-2023.md
new file mode 100644
index 0000000000..22e7537cf8
--- /dev/null
+++ b/markdown/events/2023/meetup_nextflow_in_action-sept-2023.md
@@ -0,0 +1,32 @@
+---
+title: 'Nextflow in Action'
+subtitle: Zahra Waheed and Ahmad Zyoud; Jonathan Manning
+type: talk
+start_date: '2023-09-05'
+start_time: '15:00+01:00'
+end_date: '2023-09-05'
+end_time: '16:00+01:00'
+location_name: BIC Conference Room
+---
+
+> Organised by the [the BioDev Network](https://www.youtube.com/@biodev-network)
+
+# Nextflow in Action
+
+Join us for an engaging exploration of Nextflow in designing, executing, and managing complex bioinformatics workflows. Our meetups are a platform to exchange insights and delve into practical use cases. If you missed the previous sessions, you can watch the recordings on [YouTube](https://www.youtube.com/playlist?list=PLo5QmrytFHLHUkBLviJykEHYE8ZKzJOm5). This session includes:
+
+## Zahra Waheed and Ahmad Zyoud โ Data Coordination and Archiving, EMBLโEBI
+
+The COVID-19 pandemic highlighted the importance of sharing genomic data and metadata globally, through submission to public nucleotide sequence repositories. To date, over 12 million raw reads and assembled SARS-CoV-2 sequences have been submitted to the European Nucleotide Archive (ENA) alone, which are visible and retrievable from the COVID-19 Data Portal. The rapid sharing of this data (whilst keeping in line with recommended metadata standards) is key to efficient outbreak surveillance and sequence interpretation and helps to drive a more effective public health response. Thus, developing simple, user-friendly submission tools is valuable for lowering the barrier to data entry, and maximising the rate and volume of data shared for scientific research.
+
+Seeing the need for a one-stop shop SARS-CoV-2 submission tool, the ENA team developed the SARS-CoV-2 Drag and Drop Uploader, which requires no technical skills from users and no prior knowledge of the repositoryโs submission process. The tool offers an alternative route to submit SARS-CoV-2 data to the ENA but can also be repurposed for other viral submissions, as proven by the equivalent ENA Monkeypox Uploader in response to the 2022 outbreak.
+
+Here we present a full Nextflow pipeline for the back-end automation of the SARS-CoV-2 Drop Uploader, incorporating existing ENA APIs, command line programs and AWS. We additionally present a portable, standalone version of this workflow for general ENA data submission.
+
+
+
+## Jonathan Manning โ nf-core
+
+[nf-core/differentialabundance](https://nf-co.re/differentialabundance) is a bioinformatics pipeline that can be used to analyse data represented as matrices, comparing groups of observations to generate differential statistics and downstream analyses. The initial feature set is built around RNA-seq, but we anticipate rapid expansion to include other platforms.
+
+(No recording available)
diff --git a/markdown/events/2023/meetup_use_cases_1.md b/markdown/events/2023/meetup_use_cases_1.md
index 95a138d4d4..ae586556f7 100644
--- a/markdown/events/2023/meetup_use_cases_1.md
+++ b/markdown/events/2023/meetup_use_cases_1.md
@@ -11,9 +11,20 @@ address: https://sanger.zoom.us/j/99770615046?pwd=KzczZ3FhNWZ3dzdQSUM4dnNxbmNjQT
location_url: https://goo.gl/maps/4adAsjqRwQVv18hJ6
---
+> Organised by the [the BioDev Network](https://www.youtube.com/@biodev-network)
+
Join Workflows Community to hear real-world examples showcasing how Nextflow enables teams to efficiently design, execute, and manage complex bioinformatics workflows, unlocking crucial insights from complex genomics data.
- **Anthony Underwood โ Broken String Biosciences**
+
+Anthony Underwood, Head of Bioinformatics at Broken String Biosciences talks about their implementation of Nextflow to deliver on the promise of gene editing.
+
+
+
- **Disha Lodha โ Ensembl Metazoa, EMBLโEBI**
+Disha Lodha from Ensembl Metazoa at EMBL-EBI talks about how they are moving the analytics for the popular genome browser to Nextflow.
+
+
+
Attend in person in BIC Conference Room (AB3-23,AB3-24) or via Zoom.
diff --git a/markdown/events/2023/summit_2023.md b/markdown/events/2023/summit_2023.md
new file mode 100644
index 0000000000..bc59f60f2a
--- /dev/null
+++ b/markdown/events/2023/summit_2023.md
@@ -0,0 +1,38 @@
+---
+title: Nextflow Summit 2023
+subtitle: Join us for the latest developments and innovations from the Nextflow world.
+type: talk
+start_date: '2023-10-16'
+start_time: '09:00+00:00'
+end_date: '2023-10-20'
+end_time: '18:00+00:00'
+location_name: Torre Glรฒries, Avinguda Diagonal, 211, 08018 Barcelona, Spain
+location_latlng: [41.403408, 2.1895932]
+youtube_embed: https://www.youtube.com/watch?v=Ggk59FOwvMo
+---
+
+> Organised by [Seqera](https://summit.nextflow.io/2023/barcelona/)
+
+# Welcome
+
+A showcase of the latest developments and innovations from the Nextflow world held in Barcelona and virtually:
+
+- Nextflow - central tool, language, and plugins
+- Community - pipelines, applications, and use cases
+- Ecosystem - infrastructure and environments
+- Software - containers and tool packaging
+
+# Key dates
+
+- July 3: Registration for the Nextflow SUMMIT opens
+- August 11: Call for talk abstracts closes
+- August 14: Accepted presenters are notified
+- September 18: Registration for the in-person Nextflow SUMMIT closes
+- October 6: Registration for the remote Nextflow SUMMIT closes
+- October 16-18: Hotel SB, Barcelona. nf-core Hackathon
+- October 18-20: Torre Glories, Barcelona. Nextflow SUMMIT
+
+---
+
+All recordings are on [Youtube](https://www.youtube.com/playlist?list=PLPZ8WHdZGxmUotnP-tWRVNtuNWpN7xbpL)
+We (sanger-tol) gave a presentation on using Nextflow in production in Tree of Life:
diff --git a/markdown/events/2023/symposium_wgc_dec23.md b/markdown/events/2023/symposium_wgc_dec23.md
new file mode 100644
index 0000000000..64fc01c69f
--- /dev/null
+++ b/markdown/events/2023/symposium_wgc_dec23.md
@@ -0,0 +1,59 @@
+---
+title: Nextflow Symposium 2023
+subtitle: A hybrid symposium held on Wellcome Genome Campus in Cambridge and online
+type: talk
+announcement:
+ start: 2023-12-04T9:00:00+00:00
+start_date: '2023-12-04'
+start_time: '09:00+00:00'
+end_date: '2023-12-04'
+end_time: '18:00+00:00'
+location_name: Wellcome Genome Campus, Hinxton, UK
+location_latlng: [52.07800, 0.18683]
+youtube_embed: https://www.youtube.com/watch?v=e1UcnkI8-Fk
+---
+
+> Organised by the [the BioDev Network](https://www.youtube.com/@biodev-network)
+
+# Welcome
+
+Join us in-person or online for Nextflow Symposium on the Wellcome Genome Campus. For further details, please see the [symposium website](http://bit.ly/3PK8ag7). Everyone is welcome to attend.
+
+If you have any questions about this event please reach out to [Priyanka Surana](mailto:ps22@sanger.ac.uk).
+
+# Registration
+
+You can register to attend the symposium in-person or online using the links on the [website](http://bit.ly/3PK8ag7).
+
+Attendance is free for both online and in-person. You can find travel and further information on the [Logistics section](https://sites.google.com/ebi.ac.uk/nextflow2023/logistics).
+
+**Registration is now closed**
+
+# Abstract Submissions
+
+We invite abstracts that showcase development, implementation and deployment of Nextflow pipelines. We also encourage abstracts that extend Nextflow or nf-core tools. Both in progress or recently published work are welcome for oral and poster presentation.
+
+**Abstract submission is now closed**
+
+# Schedule
+
+The symposium is a full day event. Please see the [full programme](https://sites.google.com/ebi.ac.uk/nextflow2023/programme) on symposium website for details.
+
+## In-person at Wellcome Genome Campus
+
+The in-person event will be held at the Kendrew Theatre in EMBL-EBI South Building at the Wellcome Genome Campus. See [travel information](https://sites.google.com/ebi.ac.uk/nextflow2023/logistics/travel-information), for further details.
+
+If you wish to attend in person, please [register](https://docs.google.com/forms/d/e/1FAIpQLSd60ndombsvt4hCe85wa34tgB-nNCJPIU3PSrwpgnoKYvmnKg/viewform) in advance.
+
+See [Onsite Information](https://sites.google.com/ebi.ac.uk/nextflow2023/logistics/onsite-information), for details about the location and its facilities. On the day, reegistration desk will open from 9am. If you are an external visitor, please add in time for security.
+
+## Online on Zoom and YouTube
+
+The symposium will be broadcasted via Zoom webinar. Closer to the event, registered attendees will recive the Zoom details. Slack will be used for Q&A on the day.
+
+All [posters](https://sites.google.com/ebi.ac.uk/nextflow2023/posters) will be available on the sympsoium website.
+
+---
+
+All recordings are on [Youtube](https://www.youtube.com/playlist?list=PLo5QmrytFHLFLfHxHW9WiB8o5RRCmQdco).
+We (sanger-tol) gave a presentation on resource optimisation:
diff --git a/markdown/events/2023/tol-hackathon-april-2023.md b/markdown/events/2023/tol-hackathon-april-2023.md
index 9d99f8601e..38ff829f14 100644
--- a/markdown/events/2023/tol-hackathon-april-2023.md
+++ b/markdown/events/2023/tol-hackathon-april-2023.md
@@ -18,4 +18,4 @@ We will have vouchers for beverages in the morning and afternoon. Lunch will be
## Code of conduct
-Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](http://pipelines.tol-dev.sanger.ac.uk/code_of_conduct).
+Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](/code_of_conduct).
diff --git a/markdown/events/2023/tol-hackathon-may-2023.md b/markdown/events/2023/tol-hackathon-may-2023.md
index 9641dc09d5..74309fbe03 100644
--- a/markdown/events/2023/tol-hackathon-may-2023.md
+++ b/markdown/events/2023/tol-hackathon-may-2023.md
@@ -18,4 +18,4 @@ Unfortunately, we do not have any funding for food and refreshments. :sob:
## Code of conduct
-Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](http://pipelines.tol-dev.sanger.ac.uk/code_of_conduct).
+Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](/code_of_conduct).
diff --git a/markdown/events/2023/tol_hackathon_july.md b/markdown/events/2023/tol_hackathon_july.md
index 78fc5383b3..2ff05969be 100644
--- a/markdown/events/2023/tol_hackathon_july.md
+++ b/markdown/events/2023/tol_hackathon_july.md
@@ -10,4 +10,4 @@ location_name: Floating Room, Morgan Building, Wellcome Genome Campus.
location_url: https://goo.gl/maps/PdbZXCceR7c43VXUA
---
-Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](http://pipelines.tol-dev.sanger.ac.uk/code_of_conduct).
+Please note that by attending the hackathon you are agreeing to abide by our [Code of Conduct](/code_of_conduct).
diff --git a/markdown/events/2023/training-basic-2023.md b/markdown/events/2023/training-basic-2023.md
index ccdf2e9b8b..18892ffb27 100644
--- a/markdown/events/2023/training-basic-2023.md
+++ b/markdown/events/2023/training-basic-2023.md
@@ -10,6 +10,8 @@ location_name: YouTube
import_typeform: true
---
+> Organised by [nf-core](https://nf-co.re/events/2023/training-basic-2023/)
+
# Welcome
Join us from September 6-8 2023 for the Community Nextflow & nf-core Training event!
diff --git a/markdown/events/2023/training-march-2023.md b/markdown/events/2023/training-march-2023.md
index e40b33157f..b2d0171d1f 100644
--- a/markdown/events/2023/training-march-2023.md
+++ b/markdown/events/2023/training-march-2023.md
@@ -10,6 +10,8 @@ location_name: YouTube
import_typeform: true
---
+> Organised by [nf-core](https://nf-co.re/events/2023/training-march-2023/)
+
# Welcome
Join us from March 13-16 2023 for the Nextflow and nf-core training event!
diff --git a/markdown/events/2023/training-sept-2023/index.md b/markdown/events/2023/training-sept-2023/index.md
index 2bbc1b19a4..8aa1286a34 100644
--- a/markdown/events/2023/training-sept-2023/index.md
+++ b/markdown/events/2023/training-sept-2023/index.md
@@ -1,15 +1,17 @@
---
title: Community Advanced Nextflow Training - September 2023
-subtitle: A global online advanced Nextflow training event
+subtitle: Join us for the first ever Advanced community training! Become a Nextflow expert!
type: training
start_date: '2023-09-27'
-start_time: '15:00 CEST'
+start_time: '15:00+02:00'
end_date: '2023-09-28'
-end_time: '19:00 CEST'
+end_time: '19:00+02:00'
location_name: YouTube
-import_typeform: true
+import_yypeform: true
---
+> Co-organised with [nf-core](https://nf-co.re/events/2023/training-sept-2023)
+
# Welcome
Join us from September 27-28 2023 for the Community Advanced Nextflow Training event!
@@ -20,11 +22,19 @@ Please note that this is not an introductory workshop and experience writing Nex
# Accessing the streams
-To make the workshops available to everyone they will be streamed on the [nf-core YouTube channel](https://www.youtube.com/c/nf-core).
+To make the workshops available to everyone they will be streamed on the [Advanced Nextflow training playlist on the nf-core YouTube channel](https://www.youtube.com/playlist?list=PL3xpfTVZLcNhPoEl8cT15MdIBfX9kFJCj).
+
+A link to each stream will also be posted in the [`#sept23-advanced-training`](https://nfcore.slack.com/archives/C05U1A096EQ) Slack channel.
# Attending at the Wellcome Genome Campus
-An in-person version of advanced training will be held at the Wellcome Genome Campus, Hinxton, UK. It is open to anyone - everyone welcome! The training will take place from 10am to 4pm on both days. More information about this local event can be found [here](wgc.md).
+An in-person version of advanced training will be held on 27th September at the Wellcome Genome Campus, Hinxton, UK. It is open to anyone - everyone welcome!
+
+The schedule for the in-person Wellcome Genome Campus event will differ from the schedule shown for the online event. The same content will be covered at both events, however, the content will be condensed into one day rather than spread across two.
+
+More information about this local event can be found [here](wgc).
+
+Registration for this in-person version of advanced training is now closed. Registration for the online version of the training is still open.
# Schedule
@@ -50,11 +60,9 @@ Session 2 will cover the following topics:
## Asking questions
-Questions can be asked in the Slack channels created for this event. Community volunteers will monitor these channels and will answer your questions as quickly as they can. You can join the nf-core Slack [here](https://nf-co.re/join/slack).
-
-The Slack channels for this event will become available closer to the event.
+Questions can be asked in the nf-core [`#sept23-advanced-training`](https://nfcore.slack.com/archives/C05U1A096EQ) created for this event. Community volunteers will monitor these channels and will answer your questions as quickly as they can. You can join the nf-core Slack [here](https://nf-co.re/join/slack).
-> Please be aware that due to the size of this event there may be a delay when answering your questions.
+> Please be aware that due to the size of this event, there may be a delay when answering your questions.
# Registration
diff --git a/markdown/events/2023/training-sept-2023/wgc.md b/markdown/events/2023/training-sept-2023/wgc.md
index 3abfb9694d..082429a238 100644
--- a/markdown/events/2023/training-sept-2023/wgc.md
+++ b/markdown/events/2023/training-sept-2023/wgc.md
@@ -3,23 +3,31 @@ title: Community Advanced Nextflow Training - September 2023
subtitle: Local Community Advanced Nextflow Training at the Wellcome Genome Campus, Cambridge.
type: training
start_date: '2023-09-27'
-start_time: '10:00 BST'
-end_date: '2023-09-28'
-end_time: '16:00 BST'
+start_time: '09:30+01:00'
+end_date: '2023-09-27'
+end_time: '17:00+01:00'
location_name: Garden Room, Main Building, EMBL-EBI, Wellcome Genome Campus
address: Wellcome Genome Campus, Hinxton CB10 1SA
location_url: https://www.wellcomegenomecampus.org
location_latlng: [48.8583, 2.2923]
---
+# Welcome
+
This is a free local event to be held at the [Wellcome Genome Campus, Cambridge](https://goo.gl/maps/XA8caWCAVToVT6EeA).
-Please register [here](https://form.typeform.com/to/gFMjNQzt).
+Registration for the in person Wellcome Genome Campus event is now closed.
+
+If you have any questions about this event please reach out to [Priyanka Surana](mailto:ps22@sanger.ac.uk).
+
+# Local Schedule
+
+This full-day event will run from **9.30 am - 5.00 pm BST**.
-Everyone is welcome, even if you don't work on campus!
-If you don't work on campus and would like to attend, please reach out to [Priyanka Surana](mailto:ps22@sanger.ac.uk).
+The schedule for the in-person Wellcome Genome Campus event will differ from the schedule shown for the online event.
+The same content will be covered at both events, however, the content will be condensed into one day rather than spread across two.
-## Local Schedule
+The training material will be presented virtually by Seqera Labs bioinformatics engineer [Rob Syme](https://github.com/robsyme). Tentative schedule for the day is listed below.
@@ -27,36 +35,60 @@ If you don't work on campus and would like to attend, please reach out to [Priya
Time
Wed. 27 Sept. 2023
-
Thur. 28 Sept. 2023
-
09:45
-
Welcome and Introductions
-
Daily Start
+
9:30 AM
+
+ Operator tour
+ Metadata propagation
+
-
10:00
-
Session 1
-
Session 3
+
11:00 AM
+
Break
-
12:00
-
Lunch
-
Lunch
+
11:15 AM
+
+ Grouping and splitting
+ Groovy imports
+
-
13:00
-
Session 2
-
Session 4
+
12:45 PM
+
Lunch
-
15:00
-
Q&A
-
Q&A
+
1:30 PM
+
+ Workflow structure
+ Workflow Configuration
+
+
+
+
3:00 PM
+
Break
+
+
+
3:15 PM
+
+ Troubleshooting
+ Training summary
+
+
+
+
4:45 PM
+
Open discussion
+# Asking questions
+
+We will be joined by [Harshil Patel](https://github.com/drpatelh), [Adam Talbot](https://github.com/adamrtalbot), and Chris Townend, all from Seqera Labs, who will be available to chat and answer questions during the event.
+
+# Need more information?
+
Primary contact: [ Priyanka Surana](https://nfcore.slack.com/team/U02JA08N0BC)
diff --git a/markdown/events/2024/hackathon-march-2024.md b/markdown/events/2024/hackathon-march-2024.md
new file mode 100644
index 0000000000..076b0d08a0
--- /dev/null
+++ b/markdown/events/2024/hackathon-march-2024.md
@@ -0,0 +1,70 @@
+---
+title: Hackathon - March 2024 (Cambridge)
+subtitle: Local node of the nf-core hackathon at the Wellcome Genome Campus, Cambridge.
+type: hackathon
+start_date: '2024-03-18'
+start_time: '09:30+00:00'
+end_date: '2024-03-19'
+end_time: '16:30+00:00'
+location_name: Rosalind Franklin Pavilion, Conference Centre, Wellcome Genome Campus
+address: Wellcome Genome Campus, Hinxton CB10 1SA
+location_url: https://www.wellcomegenomecampus.org
+location_latlng: [52.079047, 0.187607]
+import_typeform: true
+---
+
+Local event to be held at the [Wellcome Genome Campus, Cambridge](https://goo.gl/maps/k3fuhxd6R8Tnuoay8).
+
+For questions or concerns, contact [ Jose Perez-Silva](https://nfcore.slack.com/team/U04M3B2QSDQ) or [ Damon-Lee Pointon](https://sanger.slack.com/team/U0283HRRMB6).
+
+## Registration
+
+Registration for the March 2024 nf-core hackathon is now OPEN! ๐
+
+You can sign up [here](https://seqera.typeform.com/mar24hackathon) or in the widget below ๐
+
+
+
+Also, to help us organise the event, please fill [this form](https://forms.gle/BXHjAJtntPajfu4aA).
+
+## Local Schedule
+
+Refreshments available at 10:30 and 15:00 daily. Lunch is provided, please let us know your dietary requirements.
+
+
+
+
+
+
Time
+
Mon. 18 Mar. 2024
+
Tue. 19 Mar. 2024
+
+
+
+
+
09:30
+
Welcome and Introduction
+
Daily Start
+
+
10:00
+
Hack!
+
Hack!
+
+
+
12:30
+
Lunch
+
Lunch
+
+
+
13:30
+
Hack!
+
Hack!
+
+
+
16:00
+
Daily sync
+
Wrap up
+
+
+
+
diff --git a/markdown/events/2024/seminar_nfcore.md b/markdown/events/2024/seminar_nfcore.md
new file mode 100644
index 0000000000..270708b40b
--- /dev/null
+++ b/markdown/events/2024/seminar_nfcore.md
@@ -0,0 +1,15 @@
+---
+title: 'Informatics Seminar: nf-core: what it is and isn\'t'
+subtitle: Matthieu Muffato
+type: talk
+start_date: '2024-01-30'
+start_time: '11:00+00:00'
+end_date: '2024-01-30'
+end_time: '12:00+00:00'
+location: Zoom
+---
+
+In this talk, I want to first explain what nf-core [1] is and the place it has in the Nextflow [2] ecosystem. I will then show what it does well and what it doesn't do well, together with some potential workarounds, and try to clarify the roles and responsibilities of everyone (end-users, contributors, maintainers, etc).
+
+[1]. https://nf-co.re/
+[2]. https://www.nextflow.io/docs/latest/
diff --git a/markdown/events/2024/training-foundational-march.md b/markdown/events/2024/training-foundational-march.md
new file mode 100644
index 0000000000..2ccc5c5dcb
--- /dev/null
+++ b/markdown/events/2024/training-foundational-march.md
@@ -0,0 +1,81 @@
+---
+title: Community Foundational Nextflow Training - March 2024
+subtitle: An online Community Foundational Nextflow training event
+type: training
+start_date: '2024-03-05'
+start_time: '15:00+01:00'
+end_date: '2024-03-06'
+end_time: '18:00+01:00'
+location_name: Library Room, Conference Centre, and YouTube
+import_typeform: true
+---
+
+> Organised by [nf-core](https://nf-co.re/events/2024/training-foundational-march/)
+
+# Welcome
+
+Join us from **March 5-6** for the Community Nextflow Foundational Training event!
+
+The core training is free and delivered virtually. It will cover the fundamentals of using Nextflow.
+While the training will offer something for all skill levels it will be especially useful for those who are new to Nextflow and the nf-core community, or if you are thinking about joining the [nf-core Hackathon](https://nf-co.re/events/2024/hackathon-march-2024) for the first time.
+
+Prior knowledge of Nextflow is not required but will make it easier to understand new concepts.
+
+# Registration
+
+Registration for the training is now OPEN! ๐
+
+Please sign up [here](https://form.typeform.com/to/cueKqqeM) or in the widget below to be kept up to date with all relevant information ๐
+
+
+
+> Registering for the event helps us to understand how many people will be attending and will help us to plan for future events.
+
+# Accessing the streams
+
+To make the workshops available to everyone they will be streamed on the [nf-core YouTube channel](https://www.youtube.com/c/nf-core).
+Videos will be released at the start of each session and will be available to watch at any time after that.
+
+A YouTube playlists of the streams for this event will be made available closer to the event.
+
+For people in sanger-tol, the Library Room of the Conference Centre has been booked to watch the feed together.
+
+# Schedule
+
+Training sessions will be available from 3 pm (CET) each day. Each session will be approximately 3 hours.
+
+## Session 1 (March 5)
+
+Session 1 will provide an overview of core Nextflow concepts. You will be taken through example Nextflow pipelines and introduced to concepts that will be expanded in Sessions 2.
+
+- An introduction to Nextflow and nf-core (30 min)
+- Getting started with Nextflow (30 min)
+- A proof of concept RNA-Seq pipeline (60 min)
+- Managing dependencies and containers (30 min)
+- Groovy introduction (30 min)
+
+## Session 2 (March 6)
+
+Session 2 will provide a deeper dive into Nextflow. You will revisit concepts from Session 1 and learn more about Nextflow features and how to use them.
+
+- Channels, Processes, and Operators (60 min)
+- Modularization (20 min)
+- Configuring pipelines (20 min)
+- Deployment scenarios (20 min)
+- Cache and resume (20 min)
+- Troubleshooting (20 min)
+- Getting starting with Seqera Platform (20 min)
+
+# Asking questions
+
+Questions can be asked in the Slack channels created for this event. Community volunteers will monitor these channels and will answer your questions as quickly as they can. You can join the nf-core Slack [here](https://nf-co.re/join/slack).
+
+While the training is being streamed at a specific time, you can ask questions at any time. We will monitor the Slack channels closely for a few days after the event to make sure we answer your questions.
+
+More details about the Slack channel will be made available closer to the event.
+
+> Please be aware that due to the size of this event there may be a delay when answering your questions.
+
+# Code of conduct
+
+Please note that by attending the training event you are agreeing to abide by our [Code of Conduct](/code_of_conduct).
diff --git a/markdown/publications.md b/markdown/publications.md
index 399e6bd89c..dc623f99d4 100644
--- a/markdown/publications.md
+++ b/markdown/publications.md
@@ -1,5 +1,5 @@
Do you know of a ToL informatics publication that we are missing?
-[Let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=&projects=&template=publications.yaml&title=%5BPublication%5D%3A+DOI).
+[Let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=&projects=&template=publications.yaml&title=%5BPublication%5D%3A+DOI).
# Initiatives {#initiatives}
@@ -45,7 +45,7 @@ Do you know of a ToL informatics publication that we are missing?
# Pipelines {#pipelines}
-### [Genome After Party](https://pipelines.tol.sanger.ac.uk/genome_after_party) {#genome_after_party}
+### [Genome After Party](/genome_after_party) {#genome_after_party}
๐งฐ [BlobToolKit](https://blobtoolkit.genomehubs.org)
diff --git a/markdown/projects/genome_after_party_summary.md b/markdown/resources/genome_after_party.md
similarity index 52%
rename from markdown/projects/genome_after_party_summary.md
rename to markdown/resources/genome_after_party.md
index 07b22ce3c1..99dfda60a1 100644
--- a/markdown/projects/genome_after_party_summary.md
+++ b/markdown/resources/genome_after_party.md
@@ -1,11 +1,27 @@
-> If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+).
+# Introduction
+
+Genome After Party is a suite of pipelines to standardise the downstream analyses performed on all genomes produced by the Tree of Life. These include:
+
+- **[sanger-tol/insdcdownload](/insdcdownload)** downloads assemblies from INSDC into a Tree of Life directory structure.
+- **[sanger-tol/ensemblrepeatdownload](/ensemblrepeatdownload)** downloads repeat annotations from Ensembl into a Tree of Life directory structure.
+- **[sanger-tol/ensemblgenedownload](/ensemblgenedownload)** downloads gene annotations from Ensembl into the Tree of Life directory structure.
+- **[sanger-tol/sequencecomposition](/sequencecomposition)** extracts statistics from a genome about its sequence composition.
+- **[sanger-tol/readmapping](/readmapping)** for aligning reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.
+- **[sanger-tol/variantcalling](/variantcalling)** for calling variants using DeepVariant with PacBio data.
+- **[sanger-tol/blobtoolkit](/blobtoolkit)** is used to identify and analyse non-target DNA for eukaryotic genomes.
+- **[sanger-tol/genomenote](/genomenote)** creates HiC contact maps and collates (1) assembly information, statistics and chromosome details, (2) PacBio consensus quality and k-mer completeness, and (3) HiC mapping statistics.
+
+Learn more about our pipelines on their dedicated pages. These pipelines are created using [Nextflow DSL2](https://www.nextflow.io) and [nf-core](https://nf-co.re) template. They are designed for portability, scalability and biodiversity.
+
+A portal is being developed to automate the production of genome note publications. It will execute the Nextflow pipeline and populate an associated database with generated statistics and images. The platform is being designed in collaboration with the Enabling Platforms team to create genome note style publications for both internal Tree of Life assemblies as well as external genome assemblies.
+
> You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
> Currently, we plan to **download** all primary and alternative ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies, and run the analysis pipelines **only on the primary** assemblies.
-> [Let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+) if some analyses would be useful to have on the other assemblies too.
+> [Let us know](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+) if some analyses would be useful to have on the other assemblies too.
# INSDC Download {#insdcdownload}
-**[sanger-tol/insdcdownload](https://pipelines.tol.sanger.ac.uk/insdcdownload)** downloads assemblies from INSDC into a Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
+**[sanger-tol/insdcdownload](/insdcdownload)** downloads assemblies from INSDC into a Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
### Current features:
@@ -20,11 +36,11 @@
- Update pipeline template
- Update samplesheet validation steps
-If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Ensembl Repeat Download {#ensemblrepeatdownload}
-**[sanger-tol/ensemblrepeatdownload](https://pipelines.tol.sanger.ac.uk/ensemblrepeatdownload)** downloads repeat annotations from Ensembl into a Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
+**[sanger-tol/ensemblrepeatdownload](/ensemblrepeatdownload)** downloads repeat annotations from Ensembl into a Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
###ย Current features:
@@ -37,11 +53,11 @@ If you have an idea for a new feature โ [send us your request](https://github.
- Update pipeline template
- Update samplesheet validation steps
-If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Ensembl Gene Download {#ensemblgenedownload}
-**[sanger-tol/ensemblgenedownload](https://pipelines.tol.sanger.ac.uk/ensemblgenedownload)** downloads gene annotations from Ensembl into the Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
+**[sanger-tol/ensemblgenedownload](/ensemblgenedownload)** downloads gene annotations from Ensembl into the Tree of Life directory structure. This pipeline is run for all **primary and alternative** ToL assemblies, as well as non-ToL (VGP, _Lepidoptera_, and requests) assemblies.
### Current features:
@@ -55,11 +71,11 @@ If you have an idea for a new feature โ [send us your request](https://github.
- Update pipeline template
- Update samplesheet validation steps
-If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Sequence Composition {#sequencecomposition}
-**[sanger-tol/sequencecomposition](https://pipelines.tol.sanger.ac.uk/sequencecomposition)** extracts statistics from a genome about its sequence composition. This pipeline is run for all **primary** ToL assemblies.
+**[sanger-tol/sequencecomposition](/sequencecomposition)** extracts statistics from a genome about its sequence composition. This pipeline is run for all **primary** ToL assemblies.
### Current features:
@@ -87,11 +103,11 @@ _Only if there is an actual demand ! Use the form below to indicate interest._
- Telomeric repeat annotation (tool to be confirmed)
- Centromeric repeat annotation (tool to be confirmed)
-If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Read Mapping {#readmapping}
-**[sanger-tol/readmapping](https://pipelines.tol.sanger.ac.uk/readmapping)** aligns reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly. This pipeline is run for all **primary** ToL assemblies.
+**[sanger-tol/readmapping](/readmapping)** aligns reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly. This pipeline is run for all **primary** ToL assemblies.
### Current features:
@@ -114,11 +130,11 @@ If you have an idea for a new feature or would like this pipeline to run on othe
- Support compression with `crumble` for aligned files
- Support multiple output options โ BAM, compressed BAM, CRAM, compressed CRAM
-If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Variant Calling {#variantcalling}
-**[sanger-tol/variantcalling](https://pipelines.tol.sanger.ac.uk/variantcalling)** calls (short) variants on PacBio data using DeepVariant. This pipeline is run for all **primary** ToL assemblies.
+**[sanger-tol/variantcalling](/variantcalling)** calls (short) variants on PacBio data using DeepVariant. This pipeline is run for all **primary** ToL assemblies.
### Current features:
@@ -139,11 +155,11 @@ If you have an idea for a new feature or would like this pipeline to run on othe
- Add variant calling for short read data with FreeBayes
- Add optional read mapping subworkflow
-If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# BlobToolKit {#blobtoolkit}
-**[sanger-tol/blobtoolkit](https://pipelines.tol.sanger.ac.uk/blobtoolkit)** is used to identify and analyse non-target DNA for eukaryotic genomes. This pipeline will be run for all **primary** ToL assemblies after 1.0.0 release. Currently, the [Snakemake version](https://github.com/blobtoolkit/blobtoolkit/tree/main/src/blobtoolkit-pipeline/src) is used in production.
+**[sanger-tol/blobtoolkit](/blobtoolkit)** is used to identify and analyse non-target DNA for eukaryotic genomes. This pipeline will be run for all **primary** ToL assemblies after 1.0.0 release. Currently, the [Snakemake version](https://github.com/blobtoolkit/blobtoolkit/tree/main/src/blobtoolkit-pipeline/src) is used in production.
### Current features:
@@ -165,11 +181,11 @@ If you have an idea for a new feature or would like this pipeline to run on othe
- NCBI `blastn` search of assembly contigs with no `Diamond blastx` match against the NCBI nt database
- Add optional read mapping subworkflow
-If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
# Genome Note {#genomenote}
-**[sanger-tol/genomenote](https://pipelines.tol.sanger.ac.uk/genomenote)** generates all the data (tables and figures) used in genome note publications. These include (1) assembly information, statistics and chromosome details, (2) PacBio consensus quality and k-mer completeness, and (3) HiC contact maps and mapping statistics. This pipeline is run for all **primary** ToL assemblies.
+**[sanger-tol/genomenote](/genomenote)** generates all the data (tables and figures) used in genome note publications. These include (1) assembly information, statistics and chromosome details, (2) PacBio consensus quality and k-mer completeness, and (3) HiC contact maps and mapping statistics. This pipeline is run for all **primary** ToL assemblies.
### Current features:
@@ -184,4 +200,4 @@ If you have an idea for a new feature or would like this pipeline to run on othe
- Combine results and metadata with template Word document
- Add optional read mapping subworkflow
-If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
+If you have an idea for a new feature or would like this pipeline to run on other assemblies โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+). You can see all planned features and requests on the [project board](https://github.com/orgs/sanger-tol/projects/3).
diff --git a/markdown/tools/README.md b/markdown/resources/tools.md
similarity index 79%
rename from markdown/tools/README.md
rename to markdown/resources/tools.md
index 3c76d4ac3f..8bebe28b82 100644
--- a/markdown/tools/README.md
+++ b/markdown/resources/tools.md
@@ -1,18 +1,16 @@
-Do you know of a ToL tool that we are missing? [Let us know.](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=tool%2Cenhancement&projects=&template=add_tool.yaml&title=%5BTool%5D%3A+)
+Do you know of a ToL tool that we are missing? [Let us know.](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=muffato&labels=tool%2Cenhancement&projects=&template=add_tool.yaml&title=%5BTool%5D%3A+)
-
-
-
+
## Genomes on a Tree (GoaT)
Genomes on a Tree (GoaT), an Elasticsearch-powered datastore and search index for genome-relevant metadata and sequencing project plans and statuses. Attributes can be queried against assembly and taxon indexes through 1) an [API](https://goat.genomehubs.org/api-docs), 2) a [web](https://goat.genomehubs.org) front end, and 3) a command line interface, [GoaT-cli](https://github.com/genomehubs/goat-cli). GoaT uses NCBI Taxonomy as backbone and allows retrieval of inferred values for missing attribute values based on phylogenetic interpolation. The web front end additionally provides summary visualisations for data exploration and reporting (see https://goat.genomehubs.org). GoaT was implemented as an instance of the open-source GenomeHubs codebase. For more information visit https://github.com/genomehubs/genomehubs.
-**Pipelines:** [genomenote](https://pipelines.tol.sanger.ac.uk/genomenote) | [blobtoolkit](https://pipelines.tol.sanger.ac.uk/blobtoolkit)
+**Pipelines:** [genomenote](/genomenote) | [blobtoolkit](/blobtoolkit)
-
+
## CobiontID
@@ -26,7 +24,7 @@ Genomes on a Tree (GoaT), an Elasticsearch-powered datastore and search index fo
[YaHS](http://dx.doi.org/10.1093/bioinformatics/btac808) is a scaffolding tool using Hi-C data. It relies on a new algorithm for contig joining detection which considers the topological distribution of Hi-C signals aiming to distinguish real interaction signals from mapping noises. YaHS has been tested in a wide range of genome assemblies. Compared to other Hi-C scaffolding tools, it usually generates more contiguous scaffolds - especially with a higher N90 and L90 statistics. It is also super fast - takes less than 5 minutes to reconstruct the human genome from an assembly of 5,483 contigs with ~45X Hi-C data.
-**Pipelines:** [genomeassembly](https://pipelines.tol.sanger.ac.uk/genomeassembly)
+**Pipelines:** [genomeassembly](/genomeassembly)
@@ -34,4 +32,4 @@ Genomes on a Tree (GoaT), an Elasticsearch-powered datastore and search index fo
[MitoHiFi](https://www.biorxiv.org/content/10.1101/2022.12.23.521667v2) is able to assemble mitochondrial genomes from a wide phylogenetic range of taxa from Pacbio HiFi data. MitoHiFi is written in python and is freely available on GitHub. MitoHiFi has been used to assemble 374 mitochondrial genomes (369 from 12 phyla and 39 orders of Metazoa and from 6 species of Fungi) for the Darwin Tree of Life Project, the Vertebrate Genomes Project and the Aquatic Symbiosis Genome Project
-**Pipelines:** [genomeassembly](https://pipelines.tol.sanger.ac.uk/genomeassembly)
+**Pipelines:** [genomeassembly](/genomeassembly)
diff --git a/markdown/teams/informatics-infrastructure.md b/markdown/teams/informatics-infrastructure.md
deleted file mode 100644
index 5b032b8f5b..0000000000
--- a/markdown/teams/informatics-infrastructure.md
+++ /dev/null
@@ -1,89 +0,0 @@
-## Introduction {#introduction}
-
-The [Tree of Life](https://www.sanger.ac.uk/programme/tree-of-life/) projects will generate tens of thousands of high-quality genomes โ more than have ever been sequenced! It is a challenging and extremely exciting task that will shape the future of biology, and the teamโs role is to provide the platform for assembling and analysing those genomes at an unprecedented scale. We are the interface between the Tree of Life teams (assembly production and faculty research) and Sanger IT, working together with the informatics teams of the other programmes.
-
-The team is organised in three poles.
-
-๐ **Data management**: Our data curators and managers maintain the integrity, consistency, and quality, or multiple databases used in production, including Genomes on a Tree (GoaT), Sample Tracking System (STS), Collaborative Open Plant Omics (COPO), and BioSamples.
-
-๐ป **Bioinformatics**: Our bioinformaticians develop the suite of analysis pipelines that will run on every genome produced in Tree of Life, providing a central database of core results available for all.
-
-๐ฉ **Systems**: We develop and maintain some core systems used in production, including the execution and tracking of all bioinformatics pipelines, and the deployment of third-party web applications for internal use.
-
-## Tech stack {#tech-stack}
-
-
-The team uses a wide range of technologies, frameworks and programming languages, including Nextflow, Python, Conda, Jira, LSF, Singularity, and Kubernetes. The technology wheel below shows most of their logos. How many can you recognise?
-
-
-
-## Projects {#projects}
-
-### Genome After Party {#genome-after-party}
-
-๐งฎ **Tech Stack:** [Nextflow DSL2](https://www.nextflow.io), [Python](https://www.python.org), [React](https://react.dev), [SQLAlchemy](https://www.sqlalchemy.org), and [PostgreSQL](https://www.postgresql.org)
-
-[Genome After Party](https://pipelines.tol.sanger.ac.uk/genome_after_party) is a suite of pipelines to standardise the downstream analyses performed on all genomes produced by the Tree of Life. These include:
-
-- **[sanger-tol/insdcdownload](https://pipelines.tol.sanger.ac.uk/insdcdownload)** downloads assemblies from INSDC into a Tree of Life directory structure.
-- **[sanger-tol/ensemblrepeatdownload](https://pipelines.tol.sanger.ac.uk/ensemblrepeatdownload)** downloads repeat annotations from Ensembl into a Tree of Life directory structure.
-- **[sanger-tol/ensemblgenedownload](https://pipelines.tol.sanger.ac.uk/ensemblgenedownload)** downloads gene annotations from Ensembl into the Tree of Life directory structure.
-- **[sanger-tol/sequencecomposition](https://pipelines.tol.sanger.ac.uk/sequencecomposition)** extracts statistics from a genome about its sequence composition.
-- **[sanger-tol/readmapping](https://pipelines.tol.sanger.ac.uk/readmapping)** for aligning reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.
-- **[sanger-tol/variantcalling](https://pipelines.tol.sanger.ac.uk/variantcalling)** for calling variants using DeepVariant with PacBio data.
-- **[sanger-tol/blobtoolkit](https://pipelines.tol.sanger.ac.uk/blobtoolkit)** is used to identify and analyse non-target DNA for eukaryotic genomes.
-- **[sanger-tol/genomenote](https://pipelines.tol.sanger.ac.uk/genomenote)** creates HiC contact maps and collates (1) assembly information, statistics and chromosome details, (2) PacBio consensus quality and k-mer completeness, and (3) HiC mapping statistics.
-
-Learn more about our pipelines on their dedicated pages. These pipelines are created using [Nextflow DSL2](https://www.nextflow.io) and [nf-core](https://nf-co.re) template. They are designed for portability, scalability and biodiversity.
-
-A portal is being developed to automate the production of genome note publications. It will execute the Nextflow pipeline and populate an associated database with generated statistics and images. The platform is being designed in collaboration with the Enabling Platforms team to create genome note style publications for both internal Tree of Life assemblies as well as external genome assemblies.
-
-If you have an idea for a new feature โ [send us your request](https://github.com/sanger-tol/pipelines-website/issues/new?assignees=priyanka-surana&labels=pipeline%2Cenhancement&projects=&template=genome_after_party_feature_request.yaml&title=%5BFeature%5D%3A+).
-
-## Members {#members}
-
-### Matthieu Muffato, Team Lead {#matthieu-muffato}
-
-[](https://www.sanger.ac.uk/person/muffato-matthieu/) [](https://github.com/muffato) [](https://www.linkedin.com/in/matthieu-muffato/)
-
- Matthieu leads the Informatics Infrastructure team, which guides the implementation and delivery of the genome assembly pipelines, and provides support for large-scale genome analyses for the Tree of Life faculty teams. He joined the Wellcome Sanger Institute in February 2021, to form the Informatics Infrastructure team for the Tree of Life programme. He has recruited 7 team members, with skills covering data curation & management, software development & operations, and bioinformatics.
-
-### Guoying Qi, DevOps Software Developer {#guoying-qi}
-
-[](https://github.com/gq1) [](https://www.linkedin.com/in/guoying-qi/)
-
- Guoying, a DevOps software engineer, has the responsibility of developing and deploying software and web applications for the Tree of Life project across various platforms such as computing farms, Kubernetes, OpenStack, and public clouds.
-
-### Priyanka Surana, Senior Bioinformatician {#priyanka-surana}
-
-[](https://www.sanger.ac.uk/person/surana-priyanka/) [](https://github.com/priyanka-surana) [](https://www.linkedin.com/in/priyanka-surana/)
-
- Priyanka is a Senior Bioinformatician, overseeing the development of Nextflow pipelines for genome assembly, curation and downstream analyses. She also facilitates the workflows community and is passionate about building networks that support peer learning.
-
-### Cibin Sadasivan Baby, Senior Software Developer {#cibin-sadasivan-baby}
-
-[](https://github.com/cibinsb) [](https://www.linkedin.com/in/cibinsb/)
-
- Cibin, a Senior Software Developer, is tasked with designing and implementing the production systems for TOL-IT. Currently, Cibin is focused on building an automated platform to execute high-throughput genomic pipelines. The ultimate goal of this project is to develop a system capable of efficiently processing large amounts of genomic data.
-
-### Cibele Sotero-Caio, Genomic Data Curator {#cibele-sotero-caio}
-
-[](https://www.sanger.ac.uk/person/sotero-caio-cibele) [](https://github.com/ccaio) [](https://www.linkedin.com/in/cibele-sotero-caio-b379071a6/) [](https://twitter.com/CibeleCaio)
-
- Cibele is the data curator for the Genomes on a Tree (GoaT) - a platform developed to support the Tree of Life and other sequencing initiatives of the Earth Biogenome project (EBP).
-
-### Paul Davis, Data Manager {#paul-davis}
-
-[](https://www.sanger.ac.uk/person/davis-paul/) [](https://github.com/Paul-Davis) [](https://www.linkedin.com/in/paul-davis-uk/) [](https://twitter.com/SirPaulDavis)
-
- Paul works on the main ToL Genome Engine. This system was developed by the ToL to manage and track samples from collection, onboarding, processing in the lab, sequencing and finally the publication of the assembly and Genome Note publication. As there are many steps in this process developing methodology to identify issues as early as possible is vital to avoid wasted time and resource. Paul works at all levels of the project fielding questions about data flow, data fixes and helps other ToL staff and project stakeholders with data and information. Paul also interacts with external groups and stakeholders to maintain data integrity in the public domain.
-
-### Beth Yates, Bioinformatics Engineer {#beth-yates}
-
-[](https://github.com/BethYates) [](https://www.linkedin.com/in/bethanyates/)
-
- Beth is a Bioinformatics Engineer working on a building a platform to automate the production of [Genome Note publications](https://wellcomeopenresearch.org/treeoflife). The Universal Genome Note platform consists of a web portal, database and Nextflow pipelines. Beth is contributing to the genomenote pipeline, this pipeline fetches assembly meta data and generates some of the figures and statistics included in each genome note.
-
-### Alumni {#alumni}
-
-- Zaynab Butt, Informatics and Digital Associate
diff --git a/markdown/usage/filesystem.md b/markdown/usage/filesystem.md
new file mode 100644
index 0000000000..5396049c55
--- /dev/null
+++ b/markdown/usage/filesystem.md
@@ -0,0 +1,6 @@
+---
+title: Tree of Life directory structure
+subtitle: The standard directory structure we use to organise our data
+---
+
+> Page in progress !
diff --git a/nf-core-contributors.yaml b/nf-core-contributors.yaml
deleted file mode 100644
index 14f582d044..0000000000
--- a/nf-core-contributors.yaml
+++ /dev/null
@@ -1,967 +0,0 @@
-# nf-core contributors. Add yourself below to be featured on the website
-# ----------------------------------------------------------------------
-# Note that image_fn should correspond to logos in:
-# /public_html/assets/img/contributors-white
-# /public_html/assets/img/contributors-colour
-# Please use svg images if possible.
-
-contributors:
- - full_name: National Genomics Infrastructure
- short_name: NGI
- description: >
- The NGI provides next-generation sequencing services for Swedish academic groups.
- Many of the nf-core pipelines started life as SciLifeLab / NGI workflows.
- affiliation: SciLifeLab
- address: Tomtebodavรคgen 23A, 17165 Solna, Sweden
- url: https://ngisweden.scilifelab.se/
- affiliation_url: https://www.scilifelab.se/
- image_fn: NGI.svg
- contact: Phil Ewels
- contact_email: phil.ewels@scilifelab.se
- contact_github: ewels
- location: [59.3505174, 18.0221508]
- twitter: ngisweden
-
- - full_name: Quantitative Biology Center
- short_name: QBiC
- description: >
- The Quantitative Biology Center provides a one-stop-shop for access to high-throughput
- technologies in the life sciences and the required bioinformatics analysis.
- As a bioinformatics core facility, we provide advanced data analysis techniques.
- affiliation: Universitรคt Tรผbingen
- address: Auf der Morgenstelle 10, 72076 Tรผbingen, Germany
- url: http://qbic.life
- affiliation_url: https://www.uni-tuebingen.de
- image_fn: QBiC.svg
- contact: Gisela Gabernet
- contact_email: gisela.gabernet@qbic.uni-tuebingen.de
- contact_github: ggabernet
- location: [48.5379169, 9.0339095]
- twitter: QBIC_tue
-
- - full_name: The Francis Crick Institute
- short_name: Francis Crick
- description: >
- The Francis Crick Institute is an organisation dedicated to understanding the fundamental biology
- underlying health and disease.
- address: 1 Midland Rd, London NW1 1ST
- url: https://www.crick.ac.uk/
- image_fn: crick.svg
- contact: Harshil Patel
- contact_email: harshil.patel@crick.ac.uk
- contact_github: drpatelh
- location: [51.5317308, -0.129134219]
- twitter: TheCrick
-
- - full_name: Boehringer Ingelheim Pharma GmbH & CO. KG
- short_name: BI
- description: >
- Improving the health and quality of life of humans and animals is the goal of the research-driven pharmaceutical company Boehringer Ingelheim. The focus in doing so is on diseases for which no satisfactory treatment option exists to date. The company therefore concentrates on developing innovative therapies that can extend patientsโ lives. In animal health, Boehringer Ingelheim stands for advanced prevention.
- address: Birkendorfer Str. 65, 88400 Biberach an der Riss
- url: https://www.boehringer-ingelheim.com/
- image_fn: BI.svg
- contact: Alexander Peltzer
- contact_email: alexander.peltzer@boehringer-ingelheim.com
- contact_github: apeltzer
- location: [48.1144085, 9.797852]
- twitter: Boehringer
-
- - full_name: Seqera Labs
- short_name: Seqera
- description: >
- Seqera Labs is simplfying data analysis pipelines across cluster and cloud. As the maintainers of the Nextflow project we
- focus on enabling high-throughput computing solutions that streamline complex pipelines.
- address: Carrer D'Avila, 19, Local 2, 08005 Barcelona, Spain
- url: http://www.seqera.io
- image_fn: seqera.svg
- contact: Evan Floden
- contact_email: evan@seqera.io
- contact_github: evanfloden
- location: [41.4054012, 2.1900489]
- twitter: seqeralabs
-
- - full_name: Centre for Genomic Regulation
- short_name: CRG
- description: >
- The Centre for Genomic Regulation (CRG) is an international biomedical research institute
- of excellence, based in Barcelona, Spain, whose mission is to discover and advance knowledge
- for the benefit of society and public health.
- address: Carrer Dr. Aiguader, 88, 08003 Barcelona, Spain
- url: http://www.crg.eu/
- image_fn: CRG.svg
- contact: Paolo Di Tommaso
- contact_email: paolo.ditommaso@crg.eu
- contact_github: pditommaso
- location: [41.3853828, 2.191863]
- twitter: CRGenomica
-
- - full_name: Genome Institute of Singapore
- short_name: GIS
- description: >
- The Genome Institute of Singapore (GIS) is a national initiative for Singapore with a global vision that
- seeks to use genomic sciences to achieve extraordinary improvements in human health and public prosperity.
- address: 60 Biopolis St, Singapore 138672, Singapore
- url: https://www.a-star.edu.sg/gis
- affiliation_url: https://www.a-star.edu.sg
- image_fn: GIS.svg
- contact: Jonathan Gรถke
- contact_email: gokej@gis.a-star.edu.sg
- contact_github: jonathangoeke
- location: [1.3025202, 103.7904214]
- twitter: astar_gis
-
- - full_name: The Competence Centre for Genome Analysis Kiel
- short_name: CCGA Kiel
- description: >
- The Competence Centre for Genome Analysis Kiel is funded by the
- German Research Foundation and specialises in high-throughput sequencing of genomes.
- address: Christian-Albrechts-Platz 4, 24118 Kiel, Germany
- url: https://www.uni-kiel.de/en/research/research-infrastructures/
- contact: Marc Hoeppner
- contact_email: mphoeppner@gmail.com
- contact_github: marchoeppner
- location: [54.346058, 10.1125135]
- twitter: kieluni
-
- - full_name: The Wellcome Trust Sanger Institute
- short_name: Sanger
- description: >
- The Wellcome Sanger Institute is one of the premier centres of genomic discovery and
- understanding in the world. It leads ambitious collaborations across the globe to provide
- the foundations for further research and transformative healthcare innovations.
- address: Wellcome Trust Genome Campus, Hinxton CB10 1SA, United Kingdom
- url: https://www.sanger.ac.uk/
- image_fn: sanger.svg
- contact: Priyanka Surana; Stijn van Dongen; Victoria Offord; Edgar Garriga Nogales
- contact_email: nextflow@sanger.ac.uk
- location: [52.0797204, 0.1833987]
- twitter: sangerinstitute
-
- - full_name: Beatson Institute & Institute of Cancer Sciences
- short_name: BICR
- affiliation: Cancer Research UK, University of Glasgow
- description: >
- The Beatson Institute is one of Cancer Research UKโs core-funded institutes.
- They have built an excellent reputation for basic cancer research, including world-class
- metabolism studies and renowned in vivo modelling of tumour growth and metastasis.
- address: Garscube Estate, Switchback Road, Bearsden, Glasgow G61 1BD, United Kingdom
- url: http://www.beatson.gla.ac.uk
- affiliation_url: https://www.cancerresearchuk.org/
- image_fn: BICR.svg
- contact: Peter Bailey
- contact_email: Peter.Bailey.2@glasgow.ac.uk
- contact_github: PeterBailey
- location: [55.906079, -4.324084]
- twitter: CRUK_BI
-
- - full_name: Swedish University of Agricultural Sciences
- short_name: SLU
- description: >
- The Swedish University of Agricultural Sciences is an international organisation
- providing top class research, education and environmental analysis in the sciences for sustainable life.
- address: Almas Allรฉ 8, 75007 Uppsala, Sweden
- url: https://www.slu.se/
- image_fn: slu.svg
- contact: Hadrien Gourlรฉe
- contact_email: hadrien.gourle@slu.se
- contact_github: HadrienG
- location: [59.8149497, 17.6606801]
- twitter: _slu
-
- - full_name: International Agency for Research on Cancer
- short_name: IARC
- description: >
- The International Agency for Research on Cancer is the specialized cancer agency of the World Health Organization.
- The objective of the IARC is to promote international collaboration in cancer research.
- affiliation: World Health Organisation
- address: 150 Cours Albert Thomas, 69372 Lyon CEDEX 08, France
- url: https://www.iarc.fr
- affiliation_url: http://www.who.int/
- image_fn: IARC.svg
- contact: Matthieu Foll
- contact_email: FollM@iarc.fr
- contact_github: mfoll
- location: [45.7438643, 4.8735019]
- twitter: iarcwho
-
- - full_name: Lifebit Biotech
- short_name: Lifebit
- description: >
- Lifebit is a London-based biotech start-up which offers an intelligent genomics platform to researchers
- and their corresponding organisations for scalable, modular and reproducible genomic analysis.
- address: 338 City Road, London, England, EC1V 2PY
- url: https://lifebit.ai/
- image_fn: lifebit.svg
- contact: Phil Palmer
- contact_email: phil@lifebit.ai
- contact_github: PhilPalmer
- location: [51.530731, -0.102373]
- twitter: lifebitAI
-
- - full_name: Sun Yat-sen University Cancer Center
- short_name: SYSUCC
- description: >
- Sun Yat-sen University Cancer Center is a comprehensive cancer centre which provides clinical care to patients,
- education (students & CME), perform research and prevention activities.
- address: 651, Dongfeng East Road, Guangzhou, Guangdong, China
- url: http://sysucc.org.cn/
- image_fn: sysucc.svg
- contact: Qi Zhao
- contact_email: zhaoqi@sysucc.org.cn
- contact_github: likelet
- location: [23.1314683032, 113.2899452059]
-
- - full_name: BioFrontiers Institute at University of Colorado, Boulder
- short_name: CUBoulder
- description: >
- The BioFrontiers Institute at the University of Colorado (CU) is an interdisciplinary hub for
- bioscience research and education with a focus on improving human health.
- affiliation: DowellLab
- address: 3415 Colorado Avenue, Boulder, CO 80303, USA
- url: https://www.colorado.edu/biofrontiers/
- affiliation_url: http://dowell.colorado.edu/
- image_fn: cu-logo.svg
- contact: Ignacio Tripodi
- contact_email: ignacio.tripodi@colorado.edu
- contact_github: ignaciot
- location: [40.007528, -105.248992]
- twitter: BioFrontiers
-
- - full_name: Gregor Mendel Institute of Molecular Plant Biology
- short_name: GMI
- description: >
- The Gregor Mendel Institute of Molecular Plant Biology, an Institute of the Austrian Academy of Sciences,
- is dedicated to basic research in diverse aspects of plant molecular biology.
- affiliation: Austrian Academy of Sciences
- address: Dr.-Bohr-Gasse 3, 1030 Vienna, Austria
- url: https://gmi.oeaw.ac.at/
- affiliation_url: https://www.oeaw.ac.at/
- image_fn: GMI.svg
- contact: Patrick Hรผther
- contact_email: patrick.huether@gmi.oeaw.ac.at
- contact_github: phue
- location: [48.189641, 16.401923]
- twitter: gmivienna
-
- - full_name: Research Institute of Molecular Pathology IMP
- short_name: IMP
- description: >
- The Research Institute of Molecular Pathology (IMP) is a basic research institute in
- molecular biology, based at the Vienna Biocenter in Austria.
- address: Campus-Vienna-Biocenter 1, 1030 Vienna, Austria
- url: https://www.imp.ac.at/
- image_fn: IMP.svg
- contact: Tobias Neumann
- contact_email: tobias.neumann@imp.ac.at
- contact_github: t-neumann
- location: [48.189224, 16.402660]
- twitter: IMPvienna
-
- - full_name: City of Hope
- short_name: COH
- description: >
- City of Hope is a private, not-for-profit clinical research center, hospital and graduate medical school
- located in Duarte, California, United States.
- address: 1500 E Duarte Rd, Duarte, CA 91010, USA
- url: https://www.cityofhope.org
- image_fn: COH.svg
- contact: Denis O'Meally
- contact_email: domeally@coh.org
- contact_github: drejom
- location: [34.1298, -117.9711]
- twitter: cityofhope
-
- - full_name: Indian Institute of Science Education and Research Mohali
- short_name: IISERM
- description: >
- IISER Mohali is an autonomous academic and research institution at Mohali, Punjab, India.
- address: Knowledge city, Sector 81, Manauli PO, Sahibzada Ajit Singh Nagar, Punjab 140306
- url: http://www.iisermohali.ac.in/
- image_fn: IISER.svg
- contact: Sangram Keshari Sahu
- contact_email: sangramk@iisermohali.ac.in
- contact_github: sk-sahu
- location: [30.665125, 76.730071]
- twitter: IiserMohali
-
- - full_name: German Center for Neurodegenerative Diseases
- short_name: DZNE
- description: >
- The DZNE investigates the causes of neurodegenerative diseases and develops novel strategies
- for prevention, treatment and care.
- affiliation: Helmholtz Association
- address: Sigmund-Freud-Straรe 27, 53127 Bonn
- url: https://www.dzne.de/en
- affiliation_url: https://www.helmholtz.de/en/
- image_fn: dzne.svg
- contact: Kevin Menden
- contact_email: kevin.menden@dzne.de
- contact_github: kevinmenden
- location: [50.697147, 7.106522]
- twitter: DZNE_en
-
- - full_name: New York University
- short_name: NYU
- description: >
- Founded in 1831, New York University is now one of the largest private universities in the United States.
- Of the more than 3,000 colleges and universities in America, New York University is one of only 60 member institutions
- of the distinguished Association of American Universities.
- address: 50 West 4th Street New York, NY 10012
- url: https://www.nyu.edu
- image_fn: nyu.svg
- contact: Tobias Schraink
- contact_email: tobias.schraink@nyumc.org
- contact_github: tobsecret
- location: [40.7290421, -73.9988925]
- twitter: nyuniversity
-
- - full_name: Royal Botanic Gardens Victoria
- short_name: RBGV
- description: >
- Royal Botanic Gardens Victoria is one of the world's leading botanic gardens and a centre of excellence
- for horticulture, science and education.
- address: Birdwood Avenue, Melbourne, Victoria, Australia, 3004
- url: https://www.rbg.vic.gov.au
- affiliation_url: https://www.rbg.vic.gov.au
- image_fn: rbgv.svg
- contact: Anna Syme
- contact_email: anna.syme@rbg.vic.gov.au
- contact_github: annasyme
- location: [-37.829856, 144.979595]
- twitter: RBG_Victoria
-
- - full_name: National Bioinformatics Infrastructure Sweden
- short_name: NBIS
- description: >
- NBIS is a distributed national bioinformatics infrastructure, supporting life sciences in Sweden.
- affiliation: SciLifeLab
- address: Tomtebodavรคgen 23A, 17165 Solna, Sweden
- url: https://nbis.se/
- affiliation_url: https://www.scilifelab.se/
- image_fn: NBIS.svg
- contact: Mahesh Binzer-Panchal
- contact_email: mahesh.panchal@nbis.se
- contact_github: mahesh-panchal
- location: [59.3505174, 18.0221508]
- twitter: NBISwe
-
- - full_name: Institut Curie
- short_name: Curie
- description: >
- The Institut Curie is a private nonprofit organization created in
- 1909 which combines a leading European cancer research center with
- a hospital group. The Institut Curie Bioinformatics platform is
- composed of biostatisticians and software engineers who offer a
- multidisciplinary expertise to support the biotechnological platforms,
- the research units and the hospital in their daily activities.
- affiliation: Bioinformatics platform
- address: 26 rue d'Ulm, 75248 Paris Cedex 05, France
- url: https://science.institut-curie.org
- affiliation_url: https://science.institut-curie.org/platforms/bioinformatics/
- image_fn: curie.svg
- contact: Nicolas Servant
- contact_email: nicolas.servant@curie.fr
- contact_github: nservant
- location: [48.8534, 2.3488]
- twitter: institut_curie
-
- - full_name: Chan Zuckerberg Biohub
- short_name: CZ Biohub
- description: >
- CZ Biohub is an independent, non-profit medical research organization
- working in collaboration with UC San Francisco, Stanford University
- and UC Berkeley. Our vision is to develop and apply technologies that
- will enable doctors to cure, prevent or manage all diseases during
- our childrenโs lifetime.
- address: 499 Illinois St, San Francisco, CA, 94158
- url: https://www.czbiohub.org/
- image_fn: czbiohub.svg
- contact: Olga Botvinnik
- contact_email: olga.botvinnik@czbiohub.org
- contact_github: olgabot
- location: [37.765757, -122.387945]
- twitter: czbiohub
-
- - full_name: The Picower for Learning and Memory
- short_name: PILM
- description: >
- The institute is focused on studying all aspects of learning and memory;
- specifically, it has received over US$50 million to study Alzheimer's,
- schizophrenia and similar diseases.
- affiliation: Bioinformatics Core
- address: 43 Vassar St, Cambridge, MA 02139
- url: https://picower.mit.edu/
- affiliation_url: https://pilm-bioinformatics.github.io/knowledgebase/
- image_fn: pilm.svg
- contact: Lorena Pantano
- contact_email: lpantano@mit.edu
- contact_github: lpantano
- location: [42.3623024, -71.0917659]
- twitter: mit_picower
-
- - full_name: University of California Berkeley
- short_name: UC Berkeley
- description: >
- Our mission in the RISE Lab at Unversity of California is to develop
- technologies that enable applications to interact intelligently and
- securely with their environment in real time.
- affiliation: RISE Lab
- address: 465 Soda Hall, MC-1776, Berkeley, CA 94720-1776
- url: https://www.berkeley.edu/
- affiliation_url: https://rise.cs.berkeley.edu/
- contact: Michael Heuer
- contact_github: heuermh
- image_fn: ucberkeley.svg
- location: [37.8753756, -122.259602]
- twitter: UCBerkeley
-
- - full_name: Institut Pasteur
- short_name: Pasteur
- description: >
- The Institut Pasteur is a private, non-profit foundation. Its mission is to help
- prevent and treat diseases, mainly those of infectious origin, through research,
- teaching, and public health initiatives.
- affiliation: Hub de Bioinformatique et Biostatistique
- address: 25-28 Rue du Dr Roux, 75015 Paris, France
- url: https://www.pasteur.fr/en
- affiliation_url: https://research.pasteur.fr/en/team/bioinformatics-and-biostatistics-hub/
- image_fn: pasteur.svg
- contact: Rรฉmi Planel
- contact_email: rplanel@pasteur.fr
- contact_github: rplanel
- location: [48.840508, 2.311528]
- twitter: institutpasteur
-
- - full_name: International Institute of Information Technology - Bangalore
- short_name: IIIT-B
- description: >
- To build on the track record set by India in general and Bangalore in particular,
- to enable India to play a key role in the global IT scenario
- through a world class institute with a focus on education and research, entrepreneurship and innovation.
- address: 26/C, Electronics City, Hosur Road, Bangalore - 560100, India
- url: https://www.iiitb.ac.in/
- image_fn: iiitb.svg
- contact: Abhinav Sharma
- contact_email: abhinavsharma.dds10@iiitb.net
- contact_github: abhi18av
- location: [12.844722, 77.663056]
- twitter: iiitb_official
-
- - full_name: Max Planck Institute for Biochemistry
- short_name: MPIB
- description: >
- The Max Planck Institute of Biochemistry aims to understand the structure and function of proteins -
- from single molecules to complex organisms.
- affiliation: Technical University of Munich
- address: Am Klopferspitz 18, 82152 Planegg, Germany
- url: https://www.biochem.mpg.de/de
- affiliation_url: https://www.tum.de/en/
- image_fn: mpiblogo.svg
- contact: Drew Behrens
- contact_email: behrens@biochem.mpg.de
- contact_github: drewjbeh
- location: [48.105188, 11.459579]
- twitter: MPI_Biochem
-
- - full_name: Linnaeus University Centre for Ecology and Evolution in Microbial model Systems
- short_name: LNUc-EEMiS
- description: >
- The research that is carried out at the Linnaeus University Centre for
- Ecology and Evolution in Microbial model Systems (EEMiS) is focused on
- marine environments in the Baltic Sea โ on the border between sea and
- land, in coastal waters and in the open sea.
- affiliation: Linnaeaus University, Kalmar
- address: Norra Kajplan, Kalmar (visiting address)
- url: https://lnu.se/en/research/searchresearch/linnaeus-university-centre-for-ecology-and-evolution-in-microbial-model-systems/
- affiliation_url: https://www.lnu.se
- image_fn: lnuc-eemis-logo.svg
- contact: Daniel Lundin
- contact_email: daniel.lundin@lnu.se
- contact_github: erikrikarddaniel
- location: [56.659756, 16.364226]
- twitter: linneuni
-
- - full_name: Baylor College of Medicine
- short_name: BCM
- description: >
- Baylor College of Medicine is a health sciences university that creates knowledge
- and applies science and discoveries to further education, healthcare and community
- service locally and globally.
- address: One Baylor Plaza, Houston, Texas 77030, USA
- url: https://www.bcm.edu/
- image_fn: bcm.svg
- contact: Michael Jochum
- contact_email: michael.jochum@bcm.edu
- contact_github: MADscientist314
- location: [29.710501, -95.396370]
- twitter: bcmhouston
-
- - full_name: The Hebrew University of Jerusalem
- short_name: HUJI
- affiliation: MethylGrammar Lab
- description: >
- The MethylGrammar Lab at the Hebrew University School of Medicine is developing tools
- for analyzing bisulfite sequencing and other epigenomic sequencing methods. We are
- interested in epigenomic biomarkers of disease, especially DNA methylation.
- address: Hadassah Ein Kerem Medical School, Jerusalem, Israel
- url: https://new.huji.ac.il/en
- affiliation_url: http://methylgrammarlab.org/
- image_fn: huji_final_logo.svg
- contact: Ben Berman
- contact_email: benjamin.berman@mail.huji.ac.il
- contact_github: benbfly
- location: [31.764656, 35.150221]
- twitter: Hujimed
-
- - full_name: National Institute of Biological Standards and Control
- short_name: NIBSC
- affiliation: Analytical and Biological Sciences
- description: >
- The National Institute for Biological Standards and Control (NIBSC) plays a leading
- national and international role in assuring the quality of biological medicines and diagnostics.
- address: Blanche Ln, South Mimms, Potters Bar EN6 3QG, United Kingdom
- url: https://nibsc.org
- image_fn: NIBSC.svg
- contact: Martin Fritzsche
- contact_email: martin.fritzche@nibsc.org
- contact_github: MartinFritzsche
- location: [51.688315, -0.240508]
- twitter: NIBSC_MHRA
-
- - full_name: Ardigen S.A.
- short_name: Ardigen
- description: >
- Ardigen enables AI transformation for biotech and pharmaceutical companies to leverage the
- full potential of data. The company delivers value at the intersection of biology and computational
- methods to increase the likelihood of success and accelerate the drug discovery process.
- With our platforms based on advanced algorithms and state-of-the-art technology, we help researchers
- to get scientific insights from a large amount of data, leading to new discoveries and breakthroughs
- in fields such as personalized medicine and drug development.
- address: Podole 76, 30-394 Poland
- url: https://ardigen.com/
- image_fn: ardigen.svg
- contact: Piotr Faba
- contact_email: piotr.faba@ardigen.com
- contact_github: piotr-faba-ardigen
- location: [50.021348, 19.891909]
- twitter: Ardigen_SA
-
- - full_name: UCL Cancer Institute
- short_name: UCLcancer
- description: >
- The UCL Cancer Institute is the hub for cancer research at University College London, one of the World's leading universities. The Institute draws together over 400 talented scientists who are working together to translate research discoveries into developing kinder, more effective therapies for cancer patients.
- address: 72 Huntley Street, London, WC1E 6DD.
- url: https://www.ucl.ac.uk/cancer
- image_fn: uclcancer.svg
- contact: Lucia Conde
- contact_email: l.conde@ucl.ac.uk
- contact_github: UCL-BLIC
- location: [51.523167, -0.134347]
- twitter: uclcancer
-
- - full_name: Flomics Biotech
- short_name: Flomics
- description: >
- Flomics Biotech is developing cell-free RNA liquid biopsies for early detection of complex diseases.
- We combine optimized molecular biology protocols with machine learning algorithms to obtain accurate, reproducible and meaningful results that can be easily implemented in the clinical practice.
- address: Carrer Dr. Aiguader, 88, 08003 Barcelona, Spain
- url: http://www.flomics.com/
- image_fn: Flomics.logo.svg
- contact: Joao Curado
- contact_email: joao.curado@flomics.com
- contact_github: jcurado-flomics
- location: [41.385251, 2.194007]
- twitter: flomicsbiotech
-
- - full_name: MRC Human Genetics Unit, MRC Institute of Genetics and Molecular Medicine, University of Edinburgh
- short_name: MRCHGU
- description: >
- The MRC Human Genetics Unit discovers how changes in our DNA impact our lives.
- We combine the latest computational and experimental technologies to investigate
- how our genomes work to control the function of molecules, cells and tissues in people and populations.
- address: Western General Hospital, Crewe Road, Edinburgh, United Kingdom
- url: https://www.ed.ac.uk/mrc-human-genetics-unit
- image_fn: igmm.svg
- contact: Alison Meynert
- contact_email: alison.meynert@igmm.ed.ac.uk
- contact_github: ameynert
- location: [55.9613881, -3.2326885]
- twitter: mrc_hgu
-
- - full_name: DRESDEN-concept Genome Center
- short_name: DcGC
- description: >
- The DRESDEN-concept Genome Center is a German competence center for next generation sequencing.
- We offer services for data generation and bioinformatics in the areas of single cell applications,
- various short read sequencing applications, and long read genome sequencing and assembly.
- address: Fetscherstraรe 105, 01307 Dresden, Germany
- url: https://genomecenter.tu-dresden.de
- affiliation: Center for Molecular and Cellular Bioengineering, Dresden University of Technology
- affiliation_url: https://tu-dresden.de/cmcb
- image_fn: dcgc.svg
- contact: Katrin Sameith
- contact_email: katrin.sameith@tu-dresden.de
- contact_github: ktrns
- location: [51.056664, 13.776781]
- twitter: tudresden_de
-
- - full_name: University of Tsukuba
- short_name: UT
- description: >
- Bioinformatics Laboratory at the University of Tsukuba works
- on the development of bioinformatic methods and software and
- deployment of bioinformatics and collaborative research.
- We also offer the training of 'bioinfo-native', who can develop new bioinformatic methods.
- address: 1-2 Kasuga, Tsukuba, Ibaraki 305-8550, Japan.
- url: https://www.md.tsukuba.ac.jp/top/en/
- affiliation: Bioinformatics Laboratory
- affiliation_url: https://sites.google.com/view/ozakilab
- image_fn: ut.svg
- contact: Haruka Ozaki
- contact_email: haruka.ozaki@md.tsukuba.ac.jp
- contact_github: yuifu
- location: [36.086151, 140.106427]
- twitter: ozakilab
-
- - full_name: RIKEN Center for Biosystems Dynamics Research
- short_name: RIKEN BDR
- description: >
- RIKEN is Japan's largest comprehensive research institution renowned for high-quality research
- in a diverse range of scientific disciplines. Researchers at the Center for Biosystems Dynamics Research (BDR)
- strive to elucidate the biological functions that unfold within the body during an organismโs lifespan,
- by grasping lifecycle progression from birth to death as a dynamic process involving the establishment.
- address: 2-2-3 Minatojima-minamimachi, Chuo-ku, Kobe, Hyogo, Japan
- url: https://www.bdr.riken.jp/en/index.html
- affiliation: RIKEN
- affiliation_url: https://www.riken.jp/en/
- image_fn: RIKEN.svg
- contact: Mika Yoshimura
- contact_email: support-bit@riken.jp
- contact_github: myoshimura080822
- location: [34.657784, 135.214837]
- twitter: BDR_RIKEN
-
- - full_name: Helmholtz Institute for RNA-based Infection Research
- short_name: HIRI
- description: >
- The Helmholtz Institute for RNA-based Infection Research combines interdisciplinary expertise
- with cutting-edge research infrastructure to exploit the vast potential of RNA as a diagnostic,
- target, and drug to combat infectious diseases.
- address: Josef-Schneider-Straรe 2, 97080 Wรผrzburg
- url: https://www.helmholtz-hiri.de/
- affiliation: HIRI
- affiliation_url: https://www.helmholtz-hiri.de/
- image_fn: HIRI.svg
- contact: Lars Barquist
- contact_email: lars.barquist@helmholtz-hiri.de
- contact_github: lbarquist
- location: [49.801900, 9.957067]
- twitter: Helmholtz_HIRI
-
- - full_name: Whitehead Institute for Biomedical Research
- short_name: WIBR
- description: >
- Whitehead Institute is a non-profit research institution located in Cambridge, MA.
- The Institute is dedicated to basic biomedical research in order to improve human health.
- address: 455 Main St, Cambridge, MA 02142, USA
- url: http://wi.mit.edu/
- affiliation: WIBR
- affiliation_url: http://wi.mit.edu/
- image_fn: WIBR.svg
- contact: Charalampos Lazaris
- contact_email: lazaris@wi.mit.edu
- contact_github: chlazaris
- location: [42.3632596548816, -71.0895762732727]
- twitter: WhiteheadInst
-
- - full_name: The University of Liverpool
- short_name: UoL
- description: >
- The University of Liverpool is a public university and a member of the Russell Group of research-led universities. Established as University College Liverpool in 1881, it opened its doors to its first set of students in 1882. The universityโs motto translates into English as โThese days of peace foster learningโ.
- address: University of Liverpool, Crown Street, Liverpool L69 7ZB, UK
- url: https://www.liverpool.ac.uk/
- affiliation: Institute of Systems, Molecular and Integrative Biology
- affiliation_url: https://www.liverpool.ac.uk/systems-molecular-and-integrative-biology/
- image_fn: UoL.svg
- contact: Batool Almarzouq
- contact_email: batool@liverpool.ac.uk
- contact_github: BatoolMM
- location: [53.40689058041372, -2.96222355704338]
- twitter: LivUniISMIB
-
- - full_name: Institute of Zoology
- short_name: IOZ
- description: >
- Institute of Zoology (IOZ), Chinese Academy of Sciences (CAS),
- is a government-funded research institution in zoological sciences.
- It has a long history of over 80 years.
- The predecessor of IOZ was Fan Memorial Institute of Biology founded in 1928.
- address: Institute of Zoology, Chinese Academy of Sciences (CAS), Beijing, 100101, China
- url: http://english.ioz.cas.cn/
- affiliation: IOZ
- affiliation_url: http://ioz.cas.cn/
- image_fn: IOZ.svg
- contact: Atongsa Miyamoto
- contact_email: atongsa42@gmail.com
- contact_github: atongsa
- location: [40.1, 116.16]
-
- - full_name: University of Pavia
- short_name: UNIPV
- description: >
- The University of Pavia is one of the worldโs oldest academic institutions,
- with its foundations existing as early as the 9th Century.
- The Department of Biology and Biotechnology โLazzaro Spallanzaniโ was created in 2012
- with the aim to bring together scientists from 5 different former Departments,
- whose expertise encompasses fundamental area of life science including Biochemistry, Cytology,
- Genetics, Microbiology, Molecular Biology, Pharmacology, Physiology, and Zoology.
- More than 100 people are working at DBB to give raise and support an intensive scientific and academic activity.
- address: via Ferrata, 9 - 27100 Pavia, Italy
- url: http://www.unipv.eu/
- affiliation: Department of Biology and Biotechnology
- affiliation_url: https://dbb.unipv.it
- image_fn: DBB.svg
- contact: Francesco Lescai
- contact_email: francesco.lescai@unipv.it
- contact_github: lescai
- location: [45.20255196652138, 9.138647942903898]
- twitter: DBB_UniPV
-
- - full_name: Paleontology and Geobiology LMU Munich
- short_name: PalMuc
- description: >
- The Molecular Geobiology Lab at LMU uses comparative genomics to study the present and past
- interactions between Earth and biosphere, the evolution of primarily marine organisms
- and their symbionts, as well as the evolution of biomineralization.
- affiliation: Ludwig-Maximilians-Universitรคt Mรผnchen
- address: Richard-Wagner-Str. 10, 80333 Mรผnchen, Deutschland
- url: http://www.geobiology.eu
- affiliation_url: https://www.lmu.de
- image_fn: PalMuc.svg
- contact: Gert Wรถrheide
- contact_email: geobiologie@geo.uni-muenchen.de
- contact_github: PalMuc
- location: [48.14763887171518, 11.56436287819711]
-
- - full_name: The University of Texas at Dallas
- short_name: UTD
- description: >
- The University of Texas at Dallas (UTD or UT Dallas) is a public
- university with its main campus in Richardson, Texas. It is the largest
- public university in the Dallas area and the northernmost campus of the
- University of Texas system.
- address: Department of Biological Sciences The University of Texas at Dallas 800 W Campbell, BSB12 Richardson TX 75080
- url: https://www.utdallas.edu/
- affiliation: Department of Biological Sciences and Center for Systems Biology
- affiliation_url: https://biology.utdallas.edu/
- image_fn: UTD.svg
- contact: Edmund Miller
- contact_email: Edmund.Miller@utdallas.edu
- contact_github: emiller88
- location: [32.991173, -96.750025]
- twitter: UTD_Biology
-
- - full_name: Leiden University
- short_name: LU
- description: >
- Leiden University is a public research university in Leiden, Netherlands.
- Founded in 1575 by William, Prince of Orange as a reward to the
- city of Leiden for its defense against Spanish attacks during the
- Eighty Years' War.
- address: Leiden University, Rapenburg 70, 2311 EZ Leiden, The Netherlands
- url: https://www.universiteitleiden.nl
- affiliation: Faculty of Archaeology
- affiliation_url: https://www.universiteitleiden.nl/en/archaeology
- image_fn: uni_leiden.svg
- contact: Bjorn Peare Bartholdy
- contact_email: b.p.bartholdy@arch.leidenuniv.nl
- contact_github: bbartholdy
- location: [52.1572537935065, 4.485273371571716]
- twitter: UniLeiden
-
- - full_name: Fondazione Policlinico Universitario Agostino Gemelli IRCCS
- short_name: FPG
- description: >
- The Policlinico Gemelli is placed at the heart of the health system,
- by developing and spreading a national and international reference model
- regarding the management, organization, technology and humanization of the medical field.
- address: Largo Agostino Gemelli, 00168 Roma RM, Italy
- url: https://www.policlinicogemelli.it/
- affiliation: Bioinformatics Facility Core Research - Gemelli Science and Technology Park (GSTeP)
- affiliation_url: https://gemelligenerator.it/facilities/bioinformatics/
- image_fn: Gemelli.svg
- contact: Luciano Giaco
- contact_email: luciano.giaco@policlinicogemelli.it
- contact_github: lucianogiaco
- location: [41.9291943, 12.427542]
-
- - full_name: Center for Medical Genetics, Ghent Univerity Hospital
- short_name: CMGG
- description: >
- The Center for Medical Genetics Ghent has three main objectives: medical services, research and education. We provide our expertise for the diagnosis and care of patients with hereditary diseases.
- We strive for a better understanding of these hereditary diseases and translate this knowledge in education in a variety of disciplines.
- affiliation: Ghent University, Ghent University Hospital
- address: C. Heymanslaan 10, 9000 Ghent, Belgium
- url: https://www.cmgg.be
- affiliation_url: https://www.uzgent.be/patient/zoek-een-arts-of-dienst/centrum-voor-medische-genetica
- image_fn: cmgg.svg
- contact: Matthias De Smet
- contact_email: ict.cmgg@uzgent.be
- contact_github: CenterForMedicalGeneticsGhent
- location: [51.023023590068824, 3.7233557596027316]
- twitter: cmggent
-
- - full_name: Leibniz Institute for Natural Product Research and Infection Biology โ Hans Knรถll Institute (HKI)
- short_name: HKI
- description: >
- The HKI is an internationally leading centre for the research of new natural products of microbial origin and for the infection biology of pathogenic fungi.
- Researchers at the HKI investigate the biosynthesis and function of microbial natural products as mediators of biological communication.
- The focus here is on new active substances to combat infectious diseases and overcome resistance by developing new anti-infective strategies.
- affiliation: Leibniz Association
- address: Beutenbergstraรe 11 , 07745 Jena, Germany
- url: https://www.leibniz-hki.de/en
- affiliation_url: https://www.leibniz-gemeinschaft.de/en
- image_fn: HKI.svg
- contact: Jasmin Frangenberg
- contact_email: jasmin.frangenberg@leibniz-hki.de
- contact_github: jasmezz
- location: [50.909054, 11.57305]
- twitter: LeibnizHKI
-
- - full_name: Max Planck Institute for Evolutionary Anthropology (MPI-EVA)
- short_name: MPI-EVA
- description: >
- The Max Planck Institute for Evolutionary Anthropology unites scientists with various backgrounds (natural sciences and humanities) whose aim
- is to investigate the history of humankind from an interdisciplinary perspective with the help of comparative analyses of genes, cultures,
- cognitive abilities, languages and social systems of past and present human populations as well as those of primates closely related to human
- beings.
- affiliation: Max Planck Society
- address: Deutscher Platz 6, 04103 Leipzig, Germany
- url: https://www.eva.mpg.de/
- affiliation_url: https://www.mpg.de
- image_fn: mpi-eva.svg
- contact: James Fellows Yates
- contact_email: james_fellows_yates@eva.mpg.de
- contact_github: jfy133
- location: [51.321355, 12.395279]
- twitter: MPI_EVA_Leipzig
-
- - full_name: Cancer Research UK Manchester Institute (CRUK-MI)
- short_name: CRUK-MI
- description: >
- The CRUK Manchester Institute is a leading cancer research institute within The University of Manchester, spanning the whole spectrum of
- cancer research - from investigating the molecular and cellular basis of cancer, to translational research and the development of therapeutics.
- affiliation: University of Manchester
- address: Cancer Research UK Manchester Institute, The University of Manchester, Alderley Park, SK10 4TG, United Kingdom
- url: https://www.cruk.manchester.ac.uk/
- affiliation_url: https://www.manchester.ac.uk
- image_fn: crukmi.svg
- contact: Katarzyna Kamieniecka
- contact_email: katarzyna.murat@cruk.manchester.ac.uk
- contact_github: kmurat1
- location: [53.2780, -2.2324]
- twitter: CRUK_MI
-
- - full_name: Institut Genetics & Development of Rennes
- short_name: IGDR
- description: >
- The IGDR is deeply engaged in conceiving innovative and multidisciplinary approaches towards a quantitative and dynamic understanding of life.
- Its research revolves around 3 main scientific themes involving several scientific questions from basic to translational research:
- From molecule to cell: structure versus dynamics; From cell to organism : an identity case; From genes to cancer and genetic diseases.
- affiliation: CNRS UMR 6290, University of Rennes 1
- address: Campus Santรฉ de Villejean, 2 avenue du Professeur Lรฉon Bernard, 35043 Rennes CEDEX, France
- url: https://igdr.univ-rennes1.fr/en
- affiliation_url: https://www.univ-rennes1.fr/en
- image_fn: igdr.svg
- contact: Thomas Derrien
- contact_email: thomas.derrien@univ-rennes1.fr
- contact_github: tderrien
- location: [48.117561, -1.695918]
- twitter: IGDRennes
-
- - full_name: German Human Genome-Phenome Archive
- short_name: GHGA
- description: >
- As a nation-wide federated consortium, GHGA is building a secure omics data infrastructure that provides a framework for the use of human genome data for research purposes while preventing data misuse.
- Providing standardised, comparable, and reproducible omics workflows for the research community is one goal of GHGA, whereby we align with the nf-core community to curate best-practise workflows.
- affiliation: DKFZ
- address: Im Neuenheimer Feld 280, 69120 Heidelberg, Germany
- url: https://www.ghga.de
- affiliation_url: https://www.dkfz.de
- image_fn: GHGA.svg
- contact: Christian Mertes
- contact_email: mertes@in.tum.de
- contact_github: https://github.com/ghga-de
- location: [49.414414, 8.672961]
- twitter: ghga_de
-
- - full_name: Element Biosciences
- short_name: elembio
- description: >
- Element Biosciences is a multi-disciplinary startup focused on innovating genetic analysis tools for the research and diagnostic markets.
- Element Biosciences' mission is to empower the scientific community with more freedom and flexibility to accelerate our collective impact on humanity.
- address: 10055 Barnes Canyon Road Suite 100 San Diego, CA 92121
- url: https://www.elementbiosciences.com/
- image_fn: elembio.svg
- contact: Edmund Miller
- contact_email: edmund.miller@elembio.com
- contact_github: emiller88
- location: [32.898670, -117.199420]
- twitter: elembio
-
- - full_name: Institut Pasteur de Dakar
- short_name: IPD
- description: >
- The Institut Pasteur Dakar Foundation (IPD) is a non-profit association of public utility, concerned with promoting public health and well-being in West Africa.
- address: 36, Avenue Pasteur - BP 220 - DAKAR
- url: https://www.pasteur.sn/en
- affiliation: Department of Epidemiology, Clinical Research, and Data Science
- affiliation_url: https://www.pasteur.sn/en/research-and-public-health/epidemiology-clinical-research-and-data-science
- image_fn: ipd.svg
- contact: Amadou DIALLO
- contact_email: Amadou.DIALLO@pasteur.sn
- contact_github: popodras
- location: [14.656219, -17.435057]
- twitter: PasteurDakar
-
- - full_name: Healx Ltd.
- short_name: Healx
- description: >
- Healx is an AI-powered, patient-inspired tech company, accelerating the discovery and development of treatments for rare diseases.
- address: Charter House, 66-68 Hills Rd, Cambridge, CB2 1LA, UK
- url: https://healx.ai/
- affiliation: Digital Biology
- image_fn: HEALXLOGO.svg
- contact: Jonathan Manning
- contact_email: jonathan.manning@healx.io
- contact_github: pinin4fjords
- location: [52.19631682472531, 0.12972942680431776]
- twitter: healx
-
- - full_name: Human Technopole
- short_name: FHT
- description: >
- Human Technopole is Italyโs life science institute, located at the heart of MIND (Milano Innovation District). Human Technopoleโs overarching mission is to improve human health and well-being, by: carrying out frontier research in the life sciences aimed at developing novel approaches in preventive and personalised medicine; setting up and operating scientific services and facilities, to also be made available to external researchers; training the next generation of leading scientists; promoting innovation and progress through technology transfer. Once fully operational, HT will employ over 1.000 scientists from all over the world.ย
- address: Fondazione Human Technopole - Viale Rita Levi-Montalcini, 1 - Area MIND โ Cargo 6 - 20157 Milano Italy
- url: https://humantechnopole.it/
- affiliation: Computational Biology
- image_fn: FHT.svg
- contact: Davide Rambaldi
- contact_email: davide.rambaldi@fht.org
- contact_github: tucano
- location: [45.52008382049519, 9.106560052477223]
- twitter: humantechnopole
-
- - full_name: Hartree Centre
- short_name: HC
- description: >
- The Hartree Centre helps UK businesses and organisations of any size to explore and adopt supercomputing, data science and artificial intelligence (AI) technologies for enhanced productivity, smarter innovation and economic growth. Backed by significant UK government funding and strategic partnerships with industry leaders such as IBM, Atos and the University of Liverpool, the Hartree Centre is home to some of the most advanced digital technologies and experts in the UK.
- address: STFC Hartree Centre, Sci-Tech Daresbury, Keckwick, Daresbury,โโโ Warrington. WA4 4AD
- url: https://www.hartree.stfc.ac.uk/
- affiliation: UKRI
- image_fn: ukri_hartree.svg
- contact: Hartree Centre
- contact_email: hartree@stfc.ac.uk
- contact_github: https://github.com/stfc
- location: [53.34340, -2.64121]
- twitter: HartreeCentre
diff --git a/public_html/.htaccess b/public_html/.htaccess
index f3c7187b8d..f6e7f94c88 100644
--- a/public_html/.htaccess
+++ b/public_html/.htaccess
@@ -9,10 +9,6 @@
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule (.*)/$ /$1 [L,R=301]
- # Return join names to join.php
- RewriteCond %{REQUEST_URI} ^/join/(.*)$
- RewriteRule ^(.*)$ /join.php?t=$1 [L,NC,QSA]
-
# Event RSS feed
RewriteCond %{REQUEST_URI} ^/events/rss$
RewriteRule ^(.*)$ /events.php?rss [L,NC,QSA]
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.pdf b/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.pdf
deleted file mode 100644
index 0b69e90363..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.pdf and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.svg
deleted file mode 100644
index 7685ed48d4..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/generic/metromap_style_pipeline_workflow_components.svg
+++ /dev/null
@@ -1,917 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.png b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.png
deleted file mode 100644
index 71036b7e45..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.png and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.svg
deleted file mode 100644
index 7759131041..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_bam.svg
+++ /dev/null
@@ -1,126 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.png b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.png
deleted file mode 100644
index 49a51f45dc..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.png and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.svg
deleted file mode 100644
index 5c59117370..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/double_fastq.svg
+++ /dev/null
@@ -1,126 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.png b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.png
deleted file mode 100644
index a96b439a95..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.png and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.svg
deleted file mode 100644
index ca2b209405..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/multiple_vcf.svg
+++ /dev/null
@@ -1,164 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.png b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.png
deleted file mode 100644
index 84e32c8b2b..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.png and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.svg
deleted file mode 100644
index ec63e536d8..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_bam.svg
+++ /dev/null
@@ -1,93 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.png b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.png
deleted file mode 100644
index 45003a8330..0000000000
Binary files a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.png and /dev/null differ
diff --git a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.svg b/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.svg
deleted file mode 100644
index 54fba0ef41..0000000000
--- a/public_html/assets/graphic_design_assets/workflow_schematics_components/sarek/single_fastq.svg
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
-
-
diff --git a/markdown/tools/images/cobiontID.png b/public_html/assets/img/cobiontID.png
similarity index 100%
rename from markdown/tools/images/cobiontID.png
rename to public_html/assets/img/cobiontID.png
diff --git a/public_html/assets/img/contributors-colour/BI.svg b/public_html/assets/img/contributors-colour/BI.svg
deleted file mode 100644
index 4a0a70867b..0000000000
--- a/public_html/assets/img/contributors-colour/BI.svg
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/BICR.svg b/public_html/assets/img/contributors-colour/BICR.svg
deleted file mode 100644
index 6c6606cce7..0000000000
--- a/public_html/assets/img/contributors-colour/BICR.svg
+++ /dev/null
@@ -1,1090 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/COH.svg b/public_html/assets/img/contributors-colour/COH.svg
deleted file mode 100644
index 2722efe49b..0000000000
--- a/public_html/assets/img/contributors-colour/COH.svg
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/CRG.svg b/public_html/assets/img/contributors-colour/CRG.svg
deleted file mode 100644
index c1fd1c2d6b..0000000000
--- a/public_html/assets/img/contributors-colour/CRG.svg
+++ /dev/null
@@ -1,194 +0,0 @@
-
-
-
-]>
-
diff --git a/public_html/assets/img/contributors-colour/CZI-alt.svg b/public_html/assets/img/contributors-colour/CZI-alt.svg
deleted file mode 100644
index c63b53cae2..0000000000
--- a/public_html/assets/img/contributors-colour/CZI-alt.svg
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/CZI.svg b/public_html/assets/img/contributors-colour/CZI.svg
deleted file mode 100644
index 4615d4dee9..0000000000
--- a/public_html/assets/img/contributors-colour/CZI.svg
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/DBB.svg b/public_html/assets/img/contributors-colour/DBB.svg
deleted file mode 100644
index e8a60f5313..0000000000
--- a/public_html/assets/img/contributors-colour/DBB.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/EASI-Genomics.svg b/public_html/assets/img/contributors-colour/EASI-Genomics.svg
deleted file mode 100644
index c84dc2b75b..0000000000
--- a/public_html/assets/img/contributors-colour/EASI-Genomics.svg
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/FHT.svg b/public_html/assets/img/contributors-colour/FHT.svg
deleted file mode 100644
index 0a0dcd4073..0000000000
--- a/public_html/assets/img/contributors-colour/FHT.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/Flomics.logo.svg b/public_html/assets/img/contributors-colour/Flomics.logo.svg
deleted file mode 100644
index fc89b81f03..0000000000
--- a/public_html/assets/img/contributors-colour/Flomics.logo.svg
+++ /dev/null
@@ -1,246 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/GHGA.svg b/public_html/assets/img/contributors-colour/GHGA.svg
deleted file mode 100644
index 9034fe1549..0000000000
--- a/public_html/assets/img/contributors-colour/GHGA.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/GIS.svg b/public_html/assets/img/contributors-colour/GIS.svg
deleted file mode 100644
index be8926c2b3..0000000000
--- a/public_html/assets/img/contributors-colour/GIS.svg
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/GMI.svg b/public_html/assets/img/contributors-colour/GMI.svg
deleted file mode 100755
index a9cc470c63..0000000000
--- a/public_html/assets/img/contributors-colour/GMI.svg
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/Gemelli.svg b/public_html/assets/img/contributors-colour/Gemelli.svg
deleted file mode 100644
index 164d5f318b..0000000000
--- a/public_html/assets/img/contributors-colour/Gemelli.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/HEALXLOGO.svg b/public_html/assets/img/contributors-colour/HEALXLOGO.svg
deleted file mode 100644
index 3d10e310c6..0000000000
--- a/public_html/assets/img/contributors-colour/HEALXLOGO.svg
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/HIRI.svg b/public_html/assets/img/contributors-colour/HIRI.svg
deleted file mode 100644
index 5978c5dd8e..0000000000
--- a/public_html/assets/img/contributors-colour/HIRI.svg
+++ /dev/null
@@ -1,129 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/HKI.svg b/public_html/assets/img/contributors-colour/HKI.svg
deleted file mode 100644
index 379a1fce13..0000000000
--- a/public_html/assets/img/contributors-colour/HKI.svg
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/IARC.svg b/public_html/assets/img/contributors-colour/IARC.svg
deleted file mode 100644
index 70c2021fc5..0000000000
--- a/public_html/assets/img/contributors-colour/IARC.svg
+++ /dev/null
@@ -1,226 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/IISER.svg b/public_html/assets/img/contributors-colour/IISER.svg
deleted file mode 100644
index 5c0856864a..0000000000
--- a/public_html/assets/img/contributors-colour/IISER.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/IMP.svg b/public_html/assets/img/contributors-colour/IMP.svg
deleted file mode 100644
index d0d39e9599..0000000000
--- a/public_html/assets/img/contributors-colour/IMP.svg
+++ /dev/null
@@ -1,195 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/IOZ.svg b/public_html/assets/img/contributors-colour/IOZ.svg
deleted file mode 100644
index bcde28f9a9..0000000000
--- a/public_html/assets/img/contributors-colour/IOZ.svg
+++ /dev/null
@@ -1,1800 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/NBIS.svg b/public_html/assets/img/contributors-colour/NBIS.svg
deleted file mode 100644
index 3f2a24354a..0000000000
--- a/public_html/assets/img/contributors-colour/NBIS.svg
+++ /dev/null
@@ -1,122 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/NGI.svg b/public_html/assets/img/contributors-colour/NGI.svg
deleted file mode 100644
index 4666739861..0000000000
--- a/public_html/assets/img/contributors-colour/NGI.svg
+++ /dev/null
@@ -1,257 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/NIBSC.svg b/public_html/assets/img/contributors-colour/NIBSC.svg
deleted file mode 100644
index 97dfc3f264..0000000000
--- a/public_html/assets/img/contributors-colour/NIBSC.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/PalMuc.svg b/public_html/assets/img/contributors-colour/PalMuc.svg
deleted file mode 100644
index cb47f82b11..0000000000
--- a/public_html/assets/img/contributors-colour/PalMuc.svg
+++ /dev/null
@@ -1,699 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/QBiC.svg b/public_html/assets/img/contributors-colour/QBiC.svg
deleted file mode 100644
index cbd3dea2ac..0000000000
--- a/public_html/assets/img/contributors-colour/QBiC.svg
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/RIKEN.svg b/public_html/assets/img/contributors-colour/RIKEN.svg
deleted file mode 100644
index e5c69afab0..0000000000
--- a/public_html/assets/img/contributors-colour/RIKEN.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/SciLifeLab.svg b/public_html/assets/img/contributors-colour/SciLifeLab.svg
deleted file mode 100644
index b8834e88f5..0000000000
--- a/public_html/assets/img/contributors-colour/SciLifeLab.svg
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/SciLifeLabDC.svg b/public_html/assets/img/contributors-colour/SciLifeLabDC.svg
deleted file mode 100644
index a9adc17389..0000000000
--- a/public_html/assets/img/contributors-colour/SciLifeLabDC.svg
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/UTD.svg b/public_html/assets/img/contributors-colour/UTD.svg
deleted file mode 100755
index f848f33821..0000000000
--- a/public_html/assets/img/contributors-colour/UTD.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/UoL.svg b/public_html/assets/img/contributors-colour/UoL.svg
deleted file mode 100644
index 73807c82fb..0000000000
--- a/public_html/assets/img/contributors-colour/UoL.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/WIBR.svg b/public_html/assets/img/contributors-colour/WIBR.svg
deleted file mode 100644
index c700448e03..0000000000
--- a/public_html/assets/img/contributors-colour/WIBR.svg
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/ardigen.svg b/public_html/assets/img/contributors-colour/ardigen.svg
deleted file mode 100644
index 4ee2ab867a..0000000000
--- a/public_html/assets/img/contributors-colour/ardigen.svg
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/aws.svg b/public_html/assets/img/contributors-colour/aws.svg
deleted file mode 100644
index 1029ab9283..0000000000
--- a/public_html/assets/img/contributors-colour/aws.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/azure.svg b/public_html/assets/img/contributors-colour/azure.svg
deleted file mode 100644
index 9288c79217..0000000000
--- a/public_html/assets/img/contributors-colour/azure.svg
+++ /dev/null
@@ -1,23 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/bcm.svg b/public_html/assets/img/contributors-colour/bcm.svg
deleted file mode 100644
index d95424e026..0000000000
--- a/public_html/assets/img/contributors-colour/bcm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/bovreg.svg b/public_html/assets/img/contributors-colour/bovreg.svg
deleted file mode 100644
index d7e07bd273..0000000000
--- a/public_html/assets/img/contributors-colour/bovreg.svg
+++ /dev/null
@@ -1,5633 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/cmgg.svg b/public_html/assets/img/contributors-colour/cmgg.svg
deleted file mode 100644
index 2fdd6d91d8..0000000000
--- a/public_html/assets/img/contributors-colour/cmgg.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/crick.svg b/public_html/assets/img/contributors-colour/crick.svg
deleted file mode 100644
index 9ec0fb58f4..0000000000
--- a/public_html/assets/img/contributors-colour/crick.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/crukmi.svg b/public_html/assets/img/contributors-colour/crukmi.svg
deleted file mode 100644
index b33c74c2e1..0000000000
--- a/public_html/assets/img/contributors-colour/crukmi.svg
+++ /dev/null
@@ -1,927 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/cu-logo.svg b/public_html/assets/img/contributors-colour/cu-logo.svg
deleted file mode 100644
index 10e52f0ea1..0000000000
--- a/public_html/assets/img/contributors-colour/cu-logo.svg
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/curie.svg b/public_html/assets/img/contributors-colour/curie.svg
deleted file mode 100644
index b965b0a019..0000000000
--- a/public_html/assets/img/contributors-colour/curie.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/czbiohub.svg b/public_html/assets/img/contributors-colour/czbiohub.svg
deleted file mode 100644
index bd34c2f1fc..0000000000
--- a/public_html/assets/img/contributors-colour/czbiohub.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/dcgc.svg b/public_html/assets/img/contributors-colour/dcgc.svg
deleted file mode 100644
index 539dcb889b..0000000000
--- a/public_html/assets/img/contributors-colour/dcgc.svg
+++ /dev/null
@@ -1,290 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/dfg_logo.svg b/public_html/assets/img/contributors-colour/dfg_logo.svg
deleted file mode 100644
index 02642c4bca..0000000000
--- a/public_html/assets/img/contributors-colour/dfg_logo.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/dockstore.svg b/public_html/assets/img/contributors-colour/dockstore.svg
deleted file mode 100644
index cc2beb4c21..0000000000
--- a/public_html/assets/img/contributors-colour/dockstore.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/dzne.svg b/public_html/assets/img/contributors-colour/dzne.svg
deleted file mode 100644
index 8f06078341..0000000000
--- a/public_html/assets/img/contributors-colour/dzne.svg
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/elembio.svg b/public_html/assets/img/contributors-colour/elembio.svg
deleted file mode 100644
index d9ac02ff3c..0000000000
--- a/public_html/assets/img/contributors-colour/elembio.svg
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/public_html/assets/img/contributors-colour/hackmd.svg b/public_html/assets/img/contributors-colour/hackmd.svg
deleted file mode 100644
index 606103b359..0000000000
--- a/public_html/assets/img/contributors-colour/hackmd.svg
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/huji_final_logo.svg b/public_html/assets/img/contributors-colour/huji_final_logo.svg
deleted file mode 100644
index 44eecb51f2..0000000000
--- a/public_html/assets/img/contributors-colour/huji_final_logo.svg
+++ /dev/null
@@ -1,105 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/igdr.svg b/public_html/assets/img/contributors-colour/igdr.svg
deleted file mode 100644
index 0842b19d9d..0000000000
--- a/public_html/assets/img/contributors-colour/igdr.svg
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/igmm.svg b/public_html/assets/img/contributors-colour/igmm.svg
deleted file mode 100644
index d7303f715c..0000000000
--- a/public_html/assets/img/contributors-colour/igmm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/iiitb.svg b/public_html/assets/img/contributors-colour/iiitb.svg
deleted file mode 100644
index e3c79b003d..0000000000
--- a/public_html/assets/img/contributors-colour/iiitb.svg
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/ipd.svg b/public_html/assets/img/contributors-colour/ipd.svg
deleted file mode 100644
index f9afcc33a3..0000000000
--- a/public_html/assets/img/contributors-colour/ipd.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/lifebit.svg b/public_html/assets/img/contributors-colour/lifebit.svg
deleted file mode 100644
index f12319715e..0000000000
--- a/public_html/assets/img/contributors-colour/lifebit.svg
+++ /dev/null
@@ -1,32 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/lnuc-eemis-logo.svg b/public_html/assets/img/contributors-colour/lnuc-eemis-logo.svg
deleted file mode 100644
index e739a0c6f8..0000000000
--- a/public_html/assets/img/contributors-colour/lnuc-eemis-logo.svg
+++ /dev/null
@@ -1,417 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/mpi-eva.svg b/public_html/assets/img/contributors-colour/mpi-eva.svg
deleted file mode 100644
index f5296df963..0000000000
--- a/public_html/assets/img/contributors-colour/mpi-eva.svg
+++ /dev/null
@@ -1,227 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/mpiblogo.svg b/public_html/assets/img/contributors-colour/mpiblogo.svg
deleted file mode 100644
index a0c3791b73..0000000000
--- a/public_html/assets/img/contributors-colour/mpiblogo.svg
+++ /dev/null
@@ -1,1683 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-colour/nyu.svg b/public_html/assets/img/contributors-colour/nyu.svg
deleted file mode 100644
index 07eefff973..0000000000
--- a/public_html/assets/img/contributors-colour/nyu.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/pasteur.svg b/public_html/assets/img/contributors-colour/pasteur.svg
deleted file mode 100644
index 24ff5e6d42..0000000000
--- a/public_html/assets/img/contributors-colour/pasteur.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/pilm.svg b/public_html/assets/img/contributors-colour/pilm.svg
deleted file mode 100644
index ef30ec226f..0000000000
--- a/public_html/assets/img/contributors-colour/pilm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/public_html/assets/img/contributors-colour/rbgv.svg b/public_html/assets/img/contributors-colour/rbgv.svg
deleted file mode 100644
index 4e2de7fe47..0000000000
--- a/public_html/assets/img/contributors-colour/rbgv.svg
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/slu.svg b/public_html/assets/img/contributors-colour/slu.svg
deleted file mode 100644
index 89e830eb0f..0000000000
--- a/public_html/assets/img/contributors-colour/slu.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/sysucc.svg b/public_html/assets/img/contributors-colour/sysucc.svg
deleted file mode 100644
index 5fc5f618e5..0000000000
--- a/public_html/assets/img/contributors-colour/sysucc.svg
+++ /dev/null
@@ -1,765 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/ucberkeley.svg b/public_html/assets/img/contributors-colour/ucberkeley.svg
deleted file mode 100644
index 5c9612d0bc..0000000000
--- a/public_html/assets/img/contributors-colour/ucberkeley.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-colour/uclcancer.svg b/public_html/assets/img/contributors-colour/uclcancer.svg
deleted file mode 100644
index df0a059586..0000000000
--- a/public_html/assets/img/contributors-colour/uclcancer.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/ukri_hartree.svg b/public_html/assets/img/contributors-colour/ukri_hartree.svg
deleted file mode 100644
index 1dac24b8eb..0000000000
--- a/public_html/assets/img/contributors-colour/ukri_hartree.svg
+++ /dev/null
@@ -1,59 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/uni_leiden.svg b/public_html/assets/img/contributors-colour/uni_leiden.svg
deleted file mode 100644
index 7532679b33..0000000000
--- a/public_html/assets/img/contributors-colour/uni_leiden.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-colour/ut.svg b/public_html/assets/img/contributors-colour/ut.svg
deleted file mode 100644
index 5891f8aa47..0000000000
--- a/public_html/assets/img/contributors-colour/ut.svg
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-colour/workflowhub.svg b/public_html/assets/img/contributors-colour/workflowhub.svg
deleted file mode 100644
index d1c1318424..0000000000
--- a/public_html/assets/img/contributors-colour/workflowhub.svg
+++ /dev/null
@@ -1,179 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/BI.svg b/public_html/assets/img/contributors-white/BI.svg
deleted file mode 100644
index 4b174fb263..0000000000
--- a/public_html/assets/img/contributors-white/BI.svg
+++ /dev/null
@@ -1,124 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/BICR.svg b/public_html/assets/img/contributors-white/BICR.svg
deleted file mode 100644
index 0d114b1007..0000000000
--- a/public_html/assets/img/contributors-white/BICR.svg
+++ /dev/null
@@ -1,1074 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/COH.svg b/public_html/assets/img/contributors-white/COH.svg
deleted file mode 100644
index d989491bc5..0000000000
--- a/public_html/assets/img/contributors-white/COH.svg
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/CRG.svg b/public_html/assets/img/contributors-white/CRG.svg
deleted file mode 100644
index 8ce3cc955d..0000000000
--- a/public_html/assets/img/contributors-white/CRG.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/CZI.svg b/public_html/assets/img/contributors-white/CZI.svg
deleted file mode 100644
index 3839795e4b..0000000000
--- a/public_html/assets/img/contributors-white/CZI.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/DBB.svg b/public_html/assets/img/contributors-white/DBB.svg
deleted file mode 100644
index 8e0025377e..0000000000
--- a/public_html/assets/img/contributors-white/DBB.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/EASI-Genomics.svg b/public_html/assets/img/contributors-white/EASI-Genomics.svg
deleted file mode 100644
index cac22c6a5a..0000000000
--- a/public_html/assets/img/contributors-white/EASI-Genomics.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/FHT.svg b/public_html/assets/img/contributors-white/FHT.svg
deleted file mode 100644
index fd39911212..0000000000
--- a/public_html/assets/img/contributors-white/FHT.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/Flomics.logo.svg b/public_html/assets/img/contributors-white/Flomics.logo.svg
deleted file mode 100644
index 9eafe0ebf0..0000000000
--- a/public_html/assets/img/contributors-white/Flomics.logo.svg
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/GHGA.svg b/public_html/assets/img/contributors-white/GHGA.svg
deleted file mode 100644
index a9c5aea47f..0000000000
--- a/public_html/assets/img/contributors-white/GHGA.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/GIS.svg b/public_html/assets/img/contributors-white/GIS.svg
deleted file mode 100644
index bf8b8acee2..0000000000
--- a/public_html/assets/img/contributors-white/GIS.svg
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/GMI.svg b/public_html/assets/img/contributors-white/GMI.svg
deleted file mode 100755
index d04d70b69e..0000000000
--- a/public_html/assets/img/contributors-white/GMI.svg
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/Gemelli.svg b/public_html/assets/img/contributors-white/Gemelli.svg
deleted file mode 100644
index f2f0091405..0000000000
--- a/public_html/assets/img/contributors-white/Gemelli.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/HEALXLOGO.svg b/public_html/assets/img/contributors-white/HEALXLOGO.svg
deleted file mode 100644
index c3cffb96b1..0000000000
--- a/public_html/assets/img/contributors-white/HEALXLOGO.svg
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/HIRI.svg b/public_html/assets/img/contributors-white/HIRI.svg
deleted file mode 100644
index 1ae052b049..0000000000
--- a/public_html/assets/img/contributors-white/HIRI.svg
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/HKI.svg b/public_html/assets/img/contributors-white/HKI.svg
deleted file mode 100644
index c7ca6528aa..0000000000
--- a/public_html/assets/img/contributors-white/HKI.svg
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/public_html/assets/img/contributors-white/IARC.svg b/public_html/assets/img/contributors-white/IARC.svg
deleted file mode 100644
index e915a07931..0000000000
--- a/public_html/assets/img/contributors-white/IARC.svg
+++ /dev/null
@@ -1,355 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/IISER.svg b/public_html/assets/img/contributors-white/IISER.svg
deleted file mode 100644
index fc6cc06f40..0000000000
--- a/public_html/assets/img/contributors-white/IISER.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/IMP.svg b/public_html/assets/img/contributors-white/IMP.svg
deleted file mode 100644
index a0db53f835..0000000000
--- a/public_html/assets/img/contributors-white/IMP.svg
+++ /dev/null
@@ -1,316 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/IOZ.svg b/public_html/assets/img/contributors-white/IOZ.svg
deleted file mode 100644
index af660b513f..0000000000
--- a/public_html/assets/img/contributors-white/IOZ.svg
+++ /dev/null
@@ -1,733 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/NBIS.svg b/public_html/assets/img/contributors-white/NBIS.svg
deleted file mode 100644
index f6c3abac9a..0000000000
--- a/public_html/assets/img/contributors-white/NBIS.svg
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/NGI.svg b/public_html/assets/img/contributors-white/NGI.svg
deleted file mode 100644
index 4f2984cca8..0000000000
--- a/public_html/assets/img/contributors-white/NGI.svg
+++ /dev/null
@@ -1,242 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/NIBSC.svg b/public_html/assets/img/contributors-white/NIBSC.svg
deleted file mode 100644
index a599d223ae..0000000000
--- a/public_html/assets/img/contributors-white/NIBSC.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/PalMuc.svg b/public_html/assets/img/contributors-white/PalMuc.svg
deleted file mode 100644
index 61b121c4c8..0000000000
--- a/public_html/assets/img/contributors-white/PalMuc.svg
+++ /dev/null
@@ -1,907 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/QBiC.svg b/public_html/assets/img/contributors-white/QBiC.svg
deleted file mode 100644
index 00c73faebf..0000000000
--- a/public_html/assets/img/contributors-white/QBiC.svg
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/RIKEN.svg b/public_html/assets/img/contributors-white/RIKEN.svg
deleted file mode 100644
index a50a479e73..0000000000
--- a/public_html/assets/img/contributors-white/RIKEN.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/SciLifeLab.svg b/public_html/assets/img/contributors-white/SciLifeLab.svg
deleted file mode 100644
index 321a2b4a52..0000000000
--- a/public_html/assets/img/contributors-white/SciLifeLab.svg
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/SciLifeLabDC.svg b/public_html/assets/img/contributors-white/SciLifeLabDC.svg
deleted file mode 100644
index 75aba4a689..0000000000
--- a/public_html/assets/img/contributors-white/SciLifeLabDC.svg
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/UTD.svg b/public_html/assets/img/contributors-white/UTD.svg
deleted file mode 100644
index 6f48c0f0b2..0000000000
--- a/public_html/assets/img/contributors-white/UTD.svg
+++ /dev/null
@@ -1,301 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/UoL.svg b/public_html/assets/img/contributors-white/UoL.svg
deleted file mode 100644
index 627ee87134..0000000000
--- a/public_html/assets/img/contributors-white/UoL.svg
+++ /dev/null
@@ -1,462 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/WIBR.svg b/public_html/assets/img/contributors-white/WIBR.svg
deleted file mode 100644
index 11ac0a4d75..0000000000
--- a/public_html/assets/img/contributors-white/WIBR.svg
+++ /dev/null
@@ -1,5 +0,0 @@
-
diff --git a/public_html/assets/img/contributors-white/ardigen.svg b/public_html/assets/img/contributors-white/ardigen.svg
deleted file mode 100644
index c91f1d9ceb..0000000000
--- a/public_html/assets/img/contributors-white/ardigen.svg
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/aws.svg b/public_html/assets/img/contributors-white/aws.svg
deleted file mode 100644
index cead07cb72..0000000000
--- a/public_html/assets/img/contributors-white/aws.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/azure.svg b/public_html/assets/img/contributors-white/azure.svg
deleted file mode 100644
index 9288c79217..0000000000
--- a/public_html/assets/img/contributors-white/azure.svg
+++ /dev/null
@@ -1,23 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/bcm.svg b/public_html/assets/img/contributors-white/bcm.svg
deleted file mode 100644
index 159438fbb7..0000000000
--- a/public_html/assets/img/contributors-white/bcm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/cmgg.svg b/public_html/assets/img/contributors-white/cmgg.svg
deleted file mode 100644
index 141609d982..0000000000
--- a/public_html/assets/img/contributors-white/cmgg.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/crick.svg b/public_html/assets/img/contributors-white/crick.svg
deleted file mode 100644
index fa7c1c113e..0000000000
--- a/public_html/assets/img/contributors-white/crick.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/crukmi.svg b/public_html/assets/img/contributors-white/crukmi.svg
deleted file mode 100644
index 6b4d20b00f..0000000000
--- a/public_html/assets/img/contributors-white/crukmi.svg
+++ /dev/null
@@ -1,1061 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/cu-logo.svg b/public_html/assets/img/contributors-white/cu-logo.svg
deleted file mode 100644
index 998b543283..0000000000
--- a/public_html/assets/img/contributors-white/cu-logo.svg
+++ /dev/null
@@ -1,84 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/curie.svg b/public_html/assets/img/contributors-white/curie.svg
deleted file mode 100644
index b5fdfb950e..0000000000
--- a/public_html/assets/img/contributors-white/curie.svg
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/czbiohub.svg b/public_html/assets/img/contributors-white/czbiohub.svg
deleted file mode 100644
index 4a6d5cce68..0000000000
--- a/public_html/assets/img/contributors-white/czbiohub.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/dcgc.svg b/public_html/assets/img/contributors-white/dcgc.svg
deleted file mode 100644
index f334a2f10d..0000000000
--- a/public_html/assets/img/contributors-white/dcgc.svg
+++ /dev/null
@@ -1,287 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/dzne.svg b/public_html/assets/img/contributors-white/dzne.svg
deleted file mode 100644
index 38e31869e3..0000000000
--- a/public_html/assets/img/contributors-white/dzne.svg
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/elembio.svg b/public_html/assets/img/contributors-white/elembio.svg
deleted file mode 100644
index 73c8cda6b9..0000000000
--- a/public_html/assets/img/contributors-white/elembio.svg
+++ /dev/null
@@ -1,39 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/hackmd.svg b/public_html/assets/img/contributors-white/hackmd.svg
deleted file mode 100644
index 02c20e47e1..0000000000
--- a/public_html/assets/img/contributors-white/hackmd.svg
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/huji_final_logo.svg b/public_html/assets/img/contributors-white/huji_final_logo.svg
deleted file mode 100644
index 2d24756da5..0000000000
--- a/public_html/assets/img/contributors-white/huji_final_logo.svg
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/igdr.svg b/public_html/assets/img/contributors-white/igdr.svg
deleted file mode 100644
index ebdc129248..0000000000
--- a/public_html/assets/img/contributors-white/igdr.svg
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/igmm.svg b/public_html/assets/img/contributors-white/igmm.svg
deleted file mode 100644
index fe315fbb12..0000000000
--- a/public_html/assets/img/contributors-white/igmm.svg
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/contributors-white/iiitb.svg b/public_html/assets/img/contributors-white/iiitb.svg
deleted file mode 100644
index 2b0d0fb5ab..0000000000
--- a/public_html/assets/img/contributors-white/iiitb.svg
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/ipd.svg b/public_html/assets/img/contributors-white/ipd.svg
deleted file mode 100644
index 7a8d7dd3a5..0000000000
--- a/public_html/assets/img/contributors-white/ipd.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/lifebit.svg b/public_html/assets/img/contributors-white/lifebit.svg
deleted file mode 100644
index bed2d9c8cb..0000000000
--- a/public_html/assets/img/contributors-white/lifebit.svg
+++ /dev/null
@@ -1,32 +0,0 @@
-
diff --git a/public_html/assets/img/contributors-white/lnuc-eemis-logo.svg b/public_html/assets/img/contributors-white/lnuc-eemis-logo.svg
deleted file mode 100644
index cddd01aa86..0000000000
--- a/public_html/assets/img/contributors-white/lnuc-eemis-logo.svg
+++ /dev/null
@@ -1,320 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/mpi-eva.svg b/public_html/assets/img/contributors-white/mpi-eva.svg
deleted file mode 100644
index f5189a726a..0000000000
--- a/public_html/assets/img/contributors-white/mpi-eva.svg
+++ /dev/null
@@ -1,229 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/mpiblogo.svg b/public_html/assets/img/contributors-white/mpiblogo.svg
deleted file mode 100644
index 0a3b65f0e2..0000000000
--- a/public_html/assets/img/contributors-white/mpiblogo.svg
+++ /dev/null
@@ -1,370 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/nyu.svg b/public_html/assets/img/contributors-white/nyu.svg
deleted file mode 100644
index 0475bf96f7..0000000000
--- a/public_html/assets/img/contributors-white/nyu.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/pasteur.svg b/public_html/assets/img/contributors-white/pasteur.svg
deleted file mode 100644
index da88586ac8..0000000000
--- a/public_html/assets/img/contributors-white/pasteur.svg
+++ /dev/null
@@ -1,149 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/pilm.svg b/public_html/assets/img/contributors-white/pilm.svg
deleted file mode 100644
index 878d5dc2de..0000000000
--- a/public_html/assets/img/contributors-white/pilm.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/rbgv.svg b/public_html/assets/img/contributors-white/rbgv.svg
deleted file mode 100644
index f0b67b2229..0000000000
--- a/public_html/assets/img/contributors-white/rbgv.svg
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/slu.svg b/public_html/assets/img/contributors-white/slu.svg
deleted file mode 100644
index 803b9a089c..0000000000
--- a/public_html/assets/img/contributors-white/slu.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/sysucc.svg b/public_html/assets/img/contributors-white/sysucc.svg
deleted file mode 100644
index 7741ebbc9f..0000000000
--- a/public_html/assets/img/contributors-white/sysucc.svg
+++ /dev/null
@@ -1,393 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/ucberkeley.svg b/public_html/assets/img/contributors-white/ucberkeley.svg
deleted file mode 100644
index 89713776a5..0000000000
--- a/public_html/assets/img/contributors-white/ucberkeley.svg
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/public_html/assets/img/contributors-white/uclcancer.svg b/public_html/assets/img/contributors-white/uclcancer.svg
deleted file mode 100644
index 22bb109146..0000000000
--- a/public_html/assets/img/contributors-white/uclcancer.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public_html/assets/img/contributors-white/ukri_hartree.svg b/public_html/assets/img/contributors-white/ukri_hartree.svg
deleted file mode 100644
index 90147dbcbb..0000000000
--- a/public_html/assets/img/contributors-white/ukri_hartree.svg
+++ /dev/null
@@ -1,343 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/uni_leiden.svg b/public_html/assets/img/contributors-white/uni_leiden.svg
deleted file mode 100644
index 354a7bbc8f..0000000000
--- a/public_html/assets/img/contributors-white/uni_leiden.svg
+++ /dev/null
@@ -1,194 +0,0 @@
-
-
-
-
diff --git a/public_html/assets/img/contributors-white/ut.svg b/public_html/assets/img/contributors-white/ut.svg
deleted file mode 100644
index 3ce5448b2e..0000000000
--- a/public_html/assets/img/contributors-white/ut.svg
+++ /dev/null
@@ -1,117 +0,0 @@
-
-
-
diff --git a/public_html/assets/img/developer-images/nextflow-wrapping.svg b/public_html/assets/img/developer-images/nextflow-wrapping.svg
new file mode 100644
index 0000000000..0395f09f73
--- /dev/null
+++ b/public_html/assets/img/developer-images/nextflow-wrapping.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/public_html/assets/img/developer-images/software-packaging.svg b/public_html/assets/img/developer-images/software-packaging.svg
new file mode 100644
index 0000000000..7787535f8c
--- /dev/null
+++ b/public_html/assets/img/developer-images/software-packaging.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/public_html/assets/img/genomehubs-icon-512.png b/public_html/assets/img/genomehubs-icon-512.png
new file mode 100644
index 0000000000..0c076d4bc0
Binary files /dev/null and b/public_html/assets/img/genomehubs-icon-512.png differ
diff --git a/public_html/assets/img/logo/nf-core-logo-darkbg.ai b/public_html/assets/img/logo/nf-core-logo-darkbg.ai
deleted file mode 100644
index e0eb59b316..0000000000
--- a/public_html/assets/img/logo/nf-core-logo-darkbg.ai
+++ /dev/null
@@ -1,6363 +0,0 @@
-%PDF-1.5
%โใฯำ
-1 0 obj
<>/OCGs[5 0 R]>>/Pages 3 0 R/Type/Catalog>>
endobj
2 0 obj
<>stream
-
-
-
-
- application/pdf
-
-
- nf-core-logo-darkbg
-
-
- 2019-01-30T19:37:26+01:00
- 2019-01-30T19:37:26+01:00
- 2019-01-30T19:37:26+01:00
- Adobe Illustrator CC 22.0 (Macintosh)
-
-
-
- 256
- 64
- JPEG
- /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgAQAEAAwER
AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqxH80vN175Z8qtcaZH6utah
PFp2jxkBgbq5JCEg9eKhmp3IpgJZRFlg2s/841aZrVlHPqXmC+n8xuQ97qkxE6SEj41SE8OK/wAv
xbYOFkMj1Pyx5ftPLugWWi2kks1vYx+mks7c5G3LEs3zP0dMkGBNppih2KuxV2KuxV5Z+YPn7U49
Tl0nSpjbQ2x4Tzx7SO9NwG6qF6bd8xM2Y3QdRrNZIS4Y7Uwu01bW/rSTreXNeahpRLJ4jq1coEi4
MMk7uy+h82T0rsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirF9Y/MjyxpkzQN
K91Oho6Wyhwp8CzFV+45TLPEOJl1uOBrmfJ5h+ZHnZ9d1HyveaZZO0WhalHqNzBMyKZBEQQFI5UN
AafPIHUBjj7Sx73YZRB+dqSor/oYqp6g3HxDxBBiG4wfmfJoPaVGuH7f2Mv8sed9G8wVity0N4g5
NbS0DEeKkbMMux5RJy8Gqhk5c0frHmHR9GiWTUblYOdfTQ1Z2p14qoLHJSmI82zJmjD6jTE7384d
EjYraWk9xT9puMan5bsfwyk6kdHDl2lAcgSgk/OeIvR9IZU8VnDH7jGv68j+Z8msdpj+b9rLfLXn
PRfMAZLR2juUHKS2lADgeIoSGHyy6GUS5Obg1MMnLm35p82WflyCCa5hkmW4Yooj41BUV35EY5Mg
inUagYgCXhep3YvNSu7wAgXM0k1D1+Ny29PnmBI2beeyS4pE95Z75Y/MTQtN0S10iSwldlBWYgIU
dnYkkgnfr3zIhmAFU7LBrYRgI0XqeZbtnYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7
FXYqxn8xdYm0vyvO8DFJ7llto3GxHMEsR78FOVZpVFxdZlMMZrmdnhqI8jqkal3Y0VVFSSewAzXv
PgEmgib3StRslVru3eFX+yzDYnwr45GMweRbcunnj+oUh4opZXCRI0jnoqgk/cMkS1RiSaG6N0y7
u9H1i1uyrwzW0iuyMCpK1+JSDvRlqMMJUbDZAyxzBOxDOPzm/wB69L/4xy/8SXMnU8w5/afOPxY3
5J8pp5jvp4JLk2yQRiQlV5FqmlNyKZVix8RcXS6fxSQTVJz5t/LIaPpT6lZ3bTxwU9eKRQG4sQvJ
SPc9KZPJg4RYb9ToOCPEDyYx5UvJbPzLps8ZIIuI0andHYI4+lWOVYzUg4mmkY5InzezebfKVv5j
t7eGa4e3Fu5cFAGryFO+Z2THxO91GnGUAEvC9Qtltb+5tlYssErxhj1IRitfwzXkUXnpxqRHc9A8
t/ljZajpNlqb30sbzKJDGEUgEHpWvtmTDACLt2eDQRlESt6HrmtWejac9/dhzBGVVhGAzVY0GxIz
JnMRFl2WXKIR4jyYHq//ADkD5F0yZIJo72WZhUxxRRsVHblykXrlYzgtENZCQsWr6f8Anl5SvnRY
rS/XmQAXiiA327SnB48UHWwB6vRMvcx2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxVL9Y0H
TdYjhi1CMzQwSeqsXIqrMFKjlSlacsjKAlza8mKM9pIOx8k+WrHUhqFrZrHOq8UFSUU71ZVNaMRk
RiiDdMIabHGXEBuhfNukWtzCYpEHpXKlXAHRh0Ye+arXw8OYnHq5fAMkDGXJA+WfL1hbsttbx8Y1
HKZ/23p/M3ucx9PA5slHkuPDDDCohkGseWdF1a1+r3Vuvw/3UyALIh8Van4HbxzeHDGqpx8uKOT6
mA/nN/vXpf8Axjl/4kuU6nmHWdp84/FS/Jv/AI6uof8AGBf+J4NNzKOzPqLOvPf/ACiOqf8AGH/j
YZkZfpLsNV/dy9zxDQ/+O3p//MTD/wAnBmBHmHQYfrHvD6MzZvTvnPXP+O3qH/MTN/ycOayXMvMZ
vrPvL2/yJ/yiOl/8Yf8AjY5n4vpDv9L/AHcfcr+atA/T2iT6YLg2rTcSs4UScSpqDxJFcM4cQpnm
xDJHhLyiP/nGKwFybibzBNPKx5MzW61JPf8AvMr8HzaPygqrZBYfkfY2bIV1R24EH+5ArQ/6+R/L
+bV/J4u+J6bmS7F2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxVKPMTD04V7ksfuA/r
mq7UO0Q3YVLy7T1ZvHiKfflfZf1STmTzNy0PLfzm/wB69L/4xy/8SXMTU8w6jtPnH4qX5N/8dXUP
+MC/8TwabmUdmfUWdee/+UR1T/jD/wAbDMjL9Jdhqv7uXueIaH/x29P/AOYmH/k4MwI8w6DD9Y94
fRmbN6d8565/x29Q/wCYmb/k4c1kuZeYzfWfeXt/kT/lEdL/AOMP/GxzPxfSHf6X+7j7k9yxyHYq
7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYqo3t5b2VpNd3LcIIFLyvQmijc7CpwE0L
YykIizybtbu1u4EuLWVZoHFUkQhlP0jEG0xkCLDH9fvYTfJE0irT4EBIHJ+pA8Tmi7QnxZKH8Lk4
yAN+q/QpQl9xJp6ilR89j/DB2dOsld4TlGy+6866PFrtpo0L/Wbq4k4StGQUi2JHI92J2oPpzdnK
Lp10tTETEBuSi9Z8r6FrTxPqdt9YaEERHnIlA1K/YZfDDLGJc2WXBDJ9QtrRvKug6LLJLptr6Ekq
hJG9SR6gGtPjZsY4xHkuLTwx/SKR1/Y2t/Zy2d2nqW0w4ypUrUdeqkHJEAii2TiJCjySOD8vPJ8E
8c8Wn8ZYmDxt605oymoNC9OuVjDHuccaPEDYH3sjy1ymOT/l55Pnnknl0/lLKxeRvWnFWY1JoHp1
yo4Y9zinR4ibI+9PLCxtbCzis7RPTtoRxiSpag69WJOWAACg5EIiIoclfCydirsVdirsVdirsVdi
rsVdirsVdirsVdirsVdirsVdirsVdiqA8wWUl9od/Zxf3s9vIkY8WKniPvyMxYIa80eKBHeHz4s1
7aO8SySQOCVkQFkIYbEECm4zW7h5oSlHbkpSSyyPzkdnf+ZiSdvc4EGRJsos63rBh9E3sxjOxHNu
nhWtaZDw43dN35rLVcRr3sg/LbQbu/8AMNveCMizsW9WWYj4eQHwKD3NafRmThgTK27Q4TKYPQPb
Mz3fOxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxVjuu+
QfLmsztc3ETw3T/bngbgze7AhlJ96ZVPDGTjZdJjyGyN1XR/JPlzSrV7eK0Wf1NpZbgLK7jwNRSn
sAMMcUQnFpoQFAKCflz5OW4M/wCjwSTUIXkKA/6vKn0YPBj3MfyWK7pkFvbW9tCsFvEkMKbJHGoV
R8gNssApyQABQf/Z
-
-
-
- uuid:ace8d313-097d-0b40-a3d2-9cedbf388108
- xmp.did:43231e1a-b71a-4289-a35c-7141dd0b0d8d
- uuid:5D20892493BFDB11914A8590D31508C8
- proof:pdf
-
- uuid:2441664f-932e-5f4e-bf71-1ff982fbeabc
- xmp.did:d42e6ac9-99fd-48a5-b07c-a0e38e5ed992
- uuid:5D20892493BFDB11914A8590D31508C8
- proof:pdf
-
-
-
-
- saved
- xmp.iid:d42e6ac9-99fd-48a5-b07c-a0e38e5ed992
- 2018-02-07T09:45:32+01:00
- Adobe Illustrator CC 2017 (Macintosh)
- /
-
-
- saved
- xmp.iid:43231e1a-b71a-4289-a35c-7141dd0b0d8d
- 2019-01-30T19:37:22+01:00
- Adobe Illustrator CC 22.0 (Macintosh)
- /
-
-
-
- Document
- Print
- False
- False
- 1
-
- 360.538889
- 93.838889
- Millimeters
-
-
-
- Cyan
- Magenta
- Yellow
- Black
-
-
-
-
-
- Default Swatch Group
- 0
-
-
-
- White
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 0.000000
-
-
- Black
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 100.000000
-
-
- CMYK Red
- CMYK
- PROCESS
- 0.000000
- 100.000000
- 100.000000
- 0.000000
-
-
- CMYK Yellow
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 100.000000
- 0.000000
-
-
- CMYK Green
- CMYK
- PROCESS
- 100.000000
- 0.000000
- 100.000000
- 0.000000
-
-
- CMYK Cyan
- CMYK
- PROCESS
- 100.000000
- 0.000000
- 0.000000
- 0.000000
-
-
- CMYK Blue
- CMYK
- PROCESS
- 100.000000
- 100.000000
- 0.000000
- 0.000000
-
-
- CMYK Magenta
- CMYK
- PROCESS
- 0.000000
- 100.000000
- 0.000000
- 0.000000
-
-
- C=15 M=100 Y=90 K=10
- CMYK
- PROCESS
- 15.000000
- 100.000000
- 90.000000
- 10.000000
-
-
- C=0 M=90 Y=85 K=0
- CMYK
- PROCESS
- 0.000000
- 90.000000
- 85.000000
- 0.000000
-
-
- C=0 M=80 Y=95 K=0
- CMYK
- PROCESS
- 0.000000
- 80.000000
- 95.000000
- 0.000000
-
-
- C=0 M=50 Y=100 K=0
- CMYK
- PROCESS
- 0.000000
- 50.000000
- 100.000000
- 0.000000
-
-
- C=0 M=35 Y=85 K=0
- CMYK
- PROCESS
- 0.000000
- 35.000000
- 85.000000
- 0.000000
-
-
- C=5 M=0 Y=90 K=0
- CMYK
- PROCESS
- 5.000000
- 0.000000
- 90.000000
- 0.000000
-
-
- C=20 M=0 Y=100 K=0
- CMYK
- PROCESS
- 20.000000
- 0.000000
- 100.000000
- 0.000000
-
-
- C=50 M=0 Y=100 K=0
- CMYK
- PROCESS
- 50.000000
- 0.000000
- 100.000000
- 0.000000
-
-
- C=75 M=0 Y=100 K=0
- CMYK
- PROCESS
- 75.000000
- 0.000000
- 100.000000
- 0.000000
-
-
- C=85 M=10 Y=100 K=10
- CMYK
- PROCESS
- 85.000000
- 10.000000
- 100.000000
- 10.000000
-
-
- C=90 M=30 Y=95 K=30
- CMYK
- PROCESS
- 90.000000
- 30.000000
- 95.000000
- 30.000000
-
-
- C=75 M=0 Y=75 K=0
- CMYK
- PROCESS
- 75.000000
- 0.000000
- 75.000000
- 0.000000
-
-
- C=80 M=10 Y=45 K=0
- CMYK
- PROCESS
- 80.000000
- 10.000000
- 45.000000
- 0.000000
-
-
- C=70 M=15 Y=0 K=0
- CMYK
- PROCESS
- 70.000000
- 15.000000
- 0.000000
- 0.000000
-
-
- C=85 M=50 Y=0 K=0
- CMYK
- PROCESS
- 85.000000
- 50.000000
- 0.000000
- 0.000000
-
-
- C=100 M=95 Y=5 K=0
- CMYK
- PROCESS
- 100.000000
- 95.000000
- 5.000000
- 0.000000
-
-
- C=100 M=100 Y=25 K=25
- CMYK
- PROCESS
- 100.000000
- 100.000000
- 25.000000
- 25.000000
-
-
- C=75 M=100 Y=0 K=0
- CMYK
- PROCESS
- 75.000000
- 100.000000
- 0.000000
- 0.000000
-
-
- C=50 M=100 Y=0 K=0
- CMYK
- PROCESS
- 50.000000
- 100.000000
- 0.000000
- 0.000000
-
-
- C=35 M=100 Y=35 K=10
- CMYK
- PROCESS
- 35.000000
- 100.000000
- 35.000000
- 10.000000
-
-
- C=10 M=100 Y=50 K=0
- CMYK
- PROCESS
- 10.000000
- 100.000000
- 50.000000
- 0.000000
-
-
- C=0 M=95 Y=20 K=0
- CMYK
- PROCESS
- 0.000000
- 95.000000
- 20.000000
- 0.000000
-
-
- C=25 M=25 Y=40 K=0
- CMYK
- PROCESS
- 25.000000
- 25.000000
- 40.000000
- 0.000000
-
-
- C=40 M=45 Y=50 K=5
- CMYK
- PROCESS
- 40.000000
- 45.000000
- 50.000000
- 5.000000
-
-
- C=50 M=50 Y=60 K=25
- CMYK
- PROCESS
- 50.000000
- 50.000000
- 60.000000
- 25.000000
-
-
- C=55 M=60 Y=65 K=40
- CMYK
- PROCESS
- 55.000000
- 60.000000
- 65.000000
- 40.000000
-
-
- C=25 M=40 Y=65 K=0
- CMYK
- PROCESS
- 25.000000
- 40.000000
- 65.000000
- 0.000000
-
-
- C=30 M=50 Y=75 K=10
- CMYK
- PROCESS
- 30.000000
- 50.000000
- 75.000000
- 10.000000
-
-
- C=35 M=60 Y=80 K=25
- CMYK
- PROCESS
- 35.000000
- 60.000000
- 80.000000
- 25.000000
-
-
- C=40 M=65 Y=90 K=35
- CMYK
- PROCESS
- 40.000000
- 65.000000
- 90.000000
- 35.000000
-
-
- C=40 M=70 Y=100 K=50
- CMYK
- PROCESS
- 40.000000
- 70.000000
- 100.000000
- 50.000000
-
-
- C=50 M=70 Y=80 K=70
- CMYK
- PROCESS
- 50.000000
- 70.000000
- 80.000000
- 70.000000
-
-
-
-
-
- Grays
- 1
-
-
-
- C=0 M=0 Y=0 K=100
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 100.000000
-
-
- C=0 M=0 Y=0 K=90
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 89.999400
-
-
- C=0 M=0 Y=0 K=80
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 79.998800
-
-
- C=0 M=0 Y=0 K=70
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 69.999700
-
-
- C=0 M=0 Y=0 K=60
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 59.999100
-
-
- C=0 M=0 Y=0 K=50
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 50.000000
-
-
- C=0 M=0 Y=0 K=40
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 39.999400
-
-
- C=0 M=0 Y=0 K=30
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 29.998800
-
-
- C=0 M=0 Y=0 K=20
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 19.999700
-
-
- C=0 M=0 Y=0 K=10
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 9.999100
-
-
- C=0 M=0 Y=0 K=5
- CMYK
- PROCESS
- 0.000000
- 0.000000
- 0.000000
- 4.998800
-
-
-
-
-
- Brights
- 1
-
-
-
- C=0 M=100 Y=100 K=0
- CMYK
- PROCESS
- 0.000000
- 100.000000
- 100.000000
- 0.000000
-
-
- C=0 M=75 Y=100 K=0
- CMYK
- PROCESS
- 0.000000
- 75.000000
- 100.000000
- 0.000000
-
-
- C=0 M=10 Y=95 K=0
- CMYK
- PROCESS
- 0.000000
- 10.000000
- 95.000000
- 0.000000
-
-
- C=85 M=10 Y=100 K=0
- CMYK
- PROCESS
- 85.000000
- 10.000000
- 100.000000
- 0.000000
-
-
- C=100 M=90 Y=0 K=0
- CMYK
- PROCESS
- 100.000000
- 90.000000
- 0.000000
- 0.000000
-
-
- C=60 M=90 Y=0 K=0
- CMYK
- PROCESS
- 60.000000
- 90.000000
- 0.003100
- 0.003100
-
-
-
-
-
-
- Adobe PDF library 15.00
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
endstream
endobj
3 0 obj
<>
endobj
7 0 obj
<>/Resources<>/ExtGState<>/Properties<>/Shading<>>>/Thumb 13 0 R/TrimBox[0.0 0.0 1022.0 266.0]/Type/Page>>
endobj
8 0 obj
<>stream
-Htอษ
๏๙Uส๋j
รุ๗>๐Zษฌ๗๖ศช๎มXพฬ4ซ2ณd0๔๗ฯแำ฿>ว๐หฏรใ๛#{H1็p่ื๕๘g๘ฯใำ็฿bx๛โ9ฦโoฬฟณC๘๑ฦฟฒเ฿?฿CโQไogqิ๛ูSฯแํCoพ=R:ำส!-อeP8ฟาeผ=j=ืผฌึฯ\?บB/็G;cัฒฮาZ8R[น!๋ฤฝ๊๊+ใ8๒ฌg"v-HฑชaUn1อฌคz#ฒE>B.ธ
bM@Yi๓๋2mๅyค๗?๕Tฎbdภ]๚ป
-็]๚นตS้$0rA9ฉ๊ล5๑^BXกpืฌU[z&[/MลIMHงะ(!\๊:'@Rzแยชฑ8kUE๑'iคไ6ซะ์ี&๗ฉํฌ|dตIZยข<ๆ๔๒ณฤลทล-ชช_u6จM)?Mbฏ๚ฯ hฝ<
รs4ไbา ฐํ9Hnณb๒\๊ผ%l7y~ูเ-ี8a_Pาจ X6
ศถTฏLึโ`wiํ]pูซEฌีภุคwq๔|#ๆค.฿ Zn4
-บฬEกสt้/๋Jมง)zx๑ฟ๑ญฒ:แ+ิ>4Wภ+!แ๘ข
-%ๅ*6ขชelหล฿Ae_หบNหัjมS(๛ฅะ๕sีฮ6v,ษpไ๒มบtN๓f}Vๅัqฝ๗|[ธ>8a๖๐ฑ13mYณมึเภๅDผแiWวศํ6JWฅoๅภฐuฑกUิqบป$ึู-๎I#เ1่ื>Wภถv-ต๒|-๑ฺy}๊u.ัMฯอ๒ZuทททM M2ซอัถ|ดJ ดฯN๛*๎~โ:ฐ^oลิY~พT๔69ฏภฺฦiส*้Kั0๑AEId{sYOื๏ทภ^ญฝC}/oO`ษำMHpอข!4ฝฎ)5[}:o๕J/Lํไ>ชเไkqำrย๙ฉ+งm_&ผ#ิๆ๓งC}LyE(Il5๕เธ ๓ฏทญผpฉ๒ZภUวฅ๒Zแc ๓ณผนD)pะ๓%๖S Inซ,Ix
ใD๗ฎ1สสคธuVกrฟฉr์$
ำ|งฉฉ/*๎ฉโ๊๊|ayฑ
*I๚!ve ํคพฑ}YXkส<,แT""h5มW.ึW"็ฺl"ๅw=02๖ๆป๛์h.ผ์ฅท{๒๓หธv8คF๕บ`J้Xฃ Y8ซหOีi๎ห๒้ฯฬฝoRฅ&วฉ๗ฤW$ฺถdบๆผาธ;.พAMK+_๛พmUx_ษ์\: ฤ:ฉษหฤ3 |P%๖๒ฬ5ชค !S็uX๎5"ย1ะnyHช %:ูPำ๗ฯห,reฉ[S\22ซฐ๚ำ|ำdSล&๗{@ำฝY:๚๊ญจ8p๐ช่Kูิฅ^%ดฐ5ผZณ/KVวJE>DaนZไฆzEฮ๒เRtุ$%C6ถH๛ฒ/ำ๒ธYv4~COhOห๒Xใึeะ\๏๒จ<้I}lS๐zีข05่๚?ฎ=ฝั๏ฯi!3ฒเไbซฦฌ๐๗ป54v๊-ฏLำากฐFถ๘๖๘ๅ๗๐้ท/1๘๒{ุ>ว ($๗6Bห๊๓>๓๑ทAโaYโr+ปWyยBfชมีWyF5,\ี,U2ณกจ[Jf[อo/yใfwiอrnsวz~ยfฺชvง๙Qข#ะ >ึณ7ฯrQTWต๊ึ$1๚ึะฎEดl
-I
pฟตบ๔`$hลฌญ์Sง*ฃฦ!W๘๐PKq"ต9์Eก๒จ๒9wCซ๋๖Wำะ,VZขโ Sฑมผ่.Yำฺ\
-จ.8ฆปGนโทฬsษTDeลั=@ฐฺRยฺUฬL4
-๎๒ฌKRRูม้8~ธxช๏งFV7
:t0wๆ5ฏแ๎HฺCอmฬแmณA [ํ1ฅธ๊R฿ะHX๖้๘ตตีธU
-ฝDEก +Iผ$วฒaข^J้ฃ๛ผV๓,(tKซHF)sฐื๖ุค'#
t๑ดWXro-
-kัฎO4ซ8ฅไ(wง+h[\ฦบ%YRฯ-mkีเรี_ฅn*๙ฏฏ&L.duขนขธ(ัคงืจ.: ๊ZงCผ๕Hx\u๑ํอ๋ษ(ฎe=q$ๆc์QฌOgป0ฏ3ิญ4lEป๒ๅZVฺต~๙(Sn)]๒๎l)
Vๅฺ ๚ปUฦNฃธEไ๏N "ฐM:MSE$@ิ`6"*ATขม่^บ๙0ี1ทา