diff --git a/.github/workflows/qiita-ci.yml b/.github/workflows/qiita-ci.yml
index 6c3b06be1..2960caf6e 100644
--- a/.github/workflows/qiita-ci.yml
+++ b/.github/workflows/qiita-ci.yml
@@ -154,8 +154,6 @@ jobs:
echo "5. Setting up qiita"
conda activate qiita
- # adapt environment_script for private qiita plugins from travis to github actions.
- sed 's#export PATH="/home/travis/miniconda3/bin:$PATH"; source #source /home/runner/.profile; conda #' -i qiita_db/support_files/patches/54.sql
qiita-env make --no-load-ontologies
qiita-test-install
qiita plugins update
@@ -203,7 +201,34 @@ jobs:
QIITA_PID=`cat /tmp/supervisord.pid`
kill $QIITA_PID
sleep 10
- if [[ "$COVER_PACKAGE" != *"qiita_db"* ]]; then test_data_studies/commands.sh; all-qiita-cron-job; fi
+ # due to qiita_db tests being more complex and taking longer than
+ # the other tests we will only add some extra tests to the run that is
+ # not testing qiita_db
+ if [[ "$COVER_PACKAGE" != *"qiita_db"* ]]; then
+ # 1. testing that we can add some "dummy" studies to the db via
+ # CLI
+ test_data_studies/commands.sh;
+ # 2. making sure that all qiita cron jobs complete as expected
+ all-qiita-cron-job;
+ # 3. making sure than a production system has the expected rows
+ # in all our tables; steps: a. drop test db, b. change $QIITA_CONFIG_FP
+ # c. create new production system, c. count rows in the db.
+ qiita-env drop;
+ cp $QIITA_CONFIG_FP ${QIITA_CONFIG_FP}.bk
+ sed 's/TEST_ENVIRONMENT = TRUE/TEST_ENVIRONMENT = FALSE/g' ${QIITA_CONFIG_FP}.bk > $QIITA_CONFIG_FP;
+ qiita-env make --no-load-ontologies;
+
+ export PGPASSWORD=postgres
+ pgport=${{ job.services.postgres.ports[5432] }}
+ row_counts=`psql -h localhost -U postgres -d qiita_test -p $pgport -c "SELECT SUM(c.reltuples) FROM pg_class c JOIN pg_namespace n on n.oid = c.relnamespace WHERE n.nspname = 'qiita' AND c.relkind = 'r' AND n.nspname NOT IN ('information_schema', 'pg_catalog');"`
+ if [[ `echo $row_counts` != *" 0 "* ]]; then
+ echo "***********";
+ echo "The number of rows in a production system is not what's expected:";
+ echo $row_counts;
+ echo "***********";
+ exit 1
+ fi
+ fi
- name: Submit coveralls
uses: AndreMiras/coveralls-python-action@develop
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5f988d047..cb9e63ad9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -94,6 +94,17 @@ After the initial production release of Qiita, changes to the database schema wi
2. We keep fully patched versions of the DBS and HTML files in the repository
3. We keep a patch file for each patch as required in the `qiita_db/support_files/patches` directory. Note that **the patches will be applied in order based on the natural sort order of their filename** (e.g., `2.sql` will be applied before `10.sql`, and `10.sql` will be applied before `a.sql`)
+### Patch 91.sql
+
+In May 2024 we decided to:
+* Merge all patches into the main database schema, this means that there are no patches younger than 92.sql.
+* Added a new folder `patches/test_db_sql/` where we can store sql files that will only be applied for the test environment.
+* Added a test to the GitHub actions to test that the production database has an expected number of rows.
+
+Note that these changes mean:
+1. 92.sql is the current first sql file to patch the database.
+2. If you need to make changes (like INSERTS) _only_ to the tests database you need to add a patch to `patches/test_db_sql/`.
+
### Developer Workflow
1. Load the fully patched DBS file (e.g., `qiita-db.dbs`) in [DBSchema](http://www.dbschema.com/)
diff --git a/qiita_db/artifact.py b/qiita_db/artifact.py
index 13d72052a..94f335c94 100644
--- a/qiita_db/artifact.py
+++ b/qiita_db/artifact.py
@@ -359,7 +359,7 @@ def create(cls, filepaths, artifact_type, name=None, prep_template=None,
# There are three different ways of creating an Artifact, but all of
# them execute a set of common operations. Declare functions to avoid
# code duplication. These functions should not be used outside of the
- # create function, hence declaring them here
+ # CREATE OR REPLACE FUNCTION, hence declaring them here
def _common_creation_steps(atype, cmd_id, data_type, cmd_parameters):
gen_timestamp = datetime.now()
visibility_id = qdb.util.convert_to_id("sandbox", "visibility")
diff --git a/qiita_db/environment_manager.py b/qiita_db/environment_manager.py
index 3820f2b21..95fa8c468 100644
--- a/qiita_db/environment_manager.py
+++ b/qiita_db/environment_manager.py
@@ -200,7 +200,6 @@ def make_environment(load_ontologies, download_reference, add_demo_user):
with open(SETTINGS_FP, newline=None) as f:
qdb.sql_connection.TRN.add(f.read())
qdb.sql_connection.TRN.execute()
-
# Insert the settings values to the database
sql = """INSERT INTO settings
(test, base_data_dir, base_work_dir)
@@ -211,7 +210,6 @@ def make_environment(load_ontologies, download_reference, add_demo_user):
qiita_config.working_dir])
qdb.sql_connection.TRN.execute()
create_layout(test=test, verbose=verbose)
-
patch(verbose=verbose, test=test)
if load_ontologies:
@@ -274,7 +272,16 @@ def drop_environment(ask_for_confirmation):
# Connect to the postgres server
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT test FROM settings")
- is_test_environment = qdb.sql_connection.TRN.execute_fetchflatten()[0]
+ try:
+ is_test_environment = \
+ qdb.sql_connection.TRN.execute_fetchflatten()[0]
+ except ValueError as e:
+ # if settings doesn't exist then is fine to treat this as a test
+ # environment and clean up
+ if 'UNDEFINED_TABLE. MSG: relation "settings"' in str(e):
+ is_test_environment = True
+ else:
+ raise
qdb.sql_connection.TRN.close()
if is_test_environment:
@@ -369,15 +376,12 @@ def patch(patches_dir=PATCHES_DIR, verbose=False, test=False):
Pulls the current patch from the settings table and applies all subsequent
patches found in the patches directory.
"""
- # we are going to open and close 2 main transactions; this is a required
- # change since patch 68.sql where we transition to jsonb for all info
- # files. The 2 main transitions are: (1) get the current settings,
- # (2) each patch in their independent transaction
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT current_patch FROM settings")
current_patch = qdb.sql_connection.TRN.execute_fetchlast()
current_sql_patch_fp = join(patches_dir, current_patch)
corresponding_py_patch = partial(join, patches_dir, 'python_patches')
+ corresponding_test_sql = partial(join, patches_dir, 'test_db_sql')
sql_glob = join(patches_dir, '*.sql')
sql_patch_files = natsorted(glob(sql_glob))
@@ -389,21 +393,17 @@ def patch(patches_dir=PATCHES_DIR, verbose=False, test=False):
else:
next_patch_index = sql_patch_files.index(current_sql_patch_fp) + 1
- patch_update_sql = "UPDATE settings SET current_patch = %s"
+ if test:
+ with qdb.sql_connection.TRN:
+ _populate_test_db()
+ patch_update_sql = "UPDATE settings SET current_patch = %s"
for sql_patch_fp in sql_patch_files[next_patch_index:]:
sql_patch_filename = basename(sql_patch_fp)
- py_patch_fp = corresponding_py_patch(
- splitext(basename(sql_patch_fp))[0] + '.py')
- py_patch_filename = basename(py_patch_fp)
-
- # patch 43.sql is when we started testing patches, then in patch
- # 68.sql is when we transitioned to jsonb for the info files; let's do
- # this in its own transition
- if sql_patch_filename == '68.sql' and test:
- with qdb.sql_connection.TRN:
- _populate_test_db()
+ patch_prefix = splitext(basename(sql_patch_fp))[0]
+ py_patch_fp = corresponding_py_patch(f'{patch_prefix}.py')
+ test_sql_fp = corresponding_test_sql(f'{patch_prefix}.sql')
with qdb.sql_connection.TRN:
with open(sql_patch_fp, newline=None) as patch_file:
@@ -413,12 +413,19 @@ def patch(patches_dir=PATCHES_DIR, verbose=False, test=False):
qdb.sql_connection.TRN.add(
patch_update_sql, [sql_patch_filename])
+ if test and exists(test_sql_fp):
+ if verbose:
+ print('\t\tApplying test SQL %s...'
+ % basename(test_sql_fp))
+ with open(test_sql_fp) as test_sql:
+ qdb.sql_connection.TRN.add(test_sql.read())
+
qdb.sql_connection.TRN.execute()
if exists(py_patch_fp):
if verbose:
print('\t\tApplying python patch %s...'
- % py_patch_filename)
+ % basename(py_patch_fp))
with open(py_patch_fp) as py_patch:
exec(py_patch.read(), globals())
@@ -427,7 +434,5 @@ def patch(patches_dir=PATCHES_DIR, verbose=False, test=False):
# for the test Study (1) so a lot of the tests actually expect this.
# Now, trying to regenerate directly in the populate_test_db might
# require too many dev hours so the easiest is just do it here
- # UPDATE 01/25/2021: moving to 81.sql as we added timestamps to
- # prep info files
- if test and sql_patch_filename == '81.sql':
- qdb.study.Study(1).sample_template.generate_files()
+ if test:
+ qdb.study.Study(1).sample_template.generate_files()
diff --git a/qiita_db/support_files/patches/0.sql b/qiita_db/support_files/patches/0.sql
deleted file mode 100644
index 1138d6ba7..000000000
--- a/qiita_db/support_files/patches/0.sql
+++ /dev/null
@@ -1,61 +0,0 @@
--- Nov 14, 2014
--- This patch initializes the controlled values for some of the tables in the database
-
--- Populate user_level table
-INSERT INTO qiita.user_level (name, description) VALUES ('admin', 'Can access and do all the things'), ('dev', 'Can access all data and info about errors'), ('superuser', 'Can see all studies, can run analyses'), ('user', 'Can see own and public data, can run analyses'), ('unverified', 'Email not verified'), ('guest', 'Can view & download public data');
-
--- Populate analysis_status table
-INSERT INTO qiita.analysis_status (status) VALUES ('in_construction'), ('queued'), ('running'), ('completed'), ('error'), ('public');
-
--- Populate job_status table
-INSERT INTO qiita.job_status (status) VALUES ('queued'), ('running'), ('completed'), ('error');
-
--- Populate data_type table
-INSERT INTO qiita.data_type (data_type) VALUES ('16S'), ('18S'), ('ITS'), ('Proteomic'), ('Metabolomic'), ('Metagenomic');
-
--- Populate filetype table
-INSERT INTO qiita.filetype (type) VALUES ('SFF'), ('FASTA-Sanger'), ('FASTQ');
-
--- Populate emp_status table
-INSERT INTO qiita.emp_status (emp_status) VALUES ('EMP'), ('EMP_Processed'), ('NOT_EMP');
-
--- Populate study_status table
-INSERT INTO qiita.study_status (status, description) VALUES ('awaiting_approval', 'Awaiting approval of metadata'), ('public', 'Anyone can see this study'), ('private', 'Only owner and shared users can see this study');
-
--- Populate timeseries_type table
-INSERT INTO qiita.timeseries_type (timeseries_type) VALUES ('NOT_TIMESERIES'), ('TIMESERIES_1'), ('TIMESERIES_2'), ('TIMESERIES_3');
-
--- Populate severity table
-INSERT INTO qiita.severity (severity) VALUES ('Warning'), ('Runtime'), ('Fatal');
-
--- Populate portal_type table
-INSERT INTO qiita.portal_type (portal, description) VALUES ('QIIME', 'QIIME portal'), ('EMP', 'EMP portal'), ('QIIME_EMP', 'QIIME and EMP portals');
-
--- Populate sample_status table
-INSERT INTO qiita.required_sample_info_status (status) VALUES ('received'), ('in_preparation'), ('running'), ('completed');
-
--- Populate filepath_type table
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('raw_forward_seqs'), ('raw_reverse_seqs'), ('raw_barcodes'), ('preprocessed_fasta'), ('preprocessed_fastq'), ('preprocessed_demux'), ('biom'), ('directory'), ('plain_text'), ('reference_seqs'), ('reference_tax'), ('reference_tree'), ('log');
-
--- Populate data_directory table
-INSERT INTO qiita.data_directory (data_type, mountpoint, subdirectory, active) VALUES ('analysis', 'analysis', '', true), ('job', 'job', '', true), ('preprocessed_data', 'preprocessed_data', '', true), ('processed_data', 'processed_data', '', true), ('raw_data', 'raw_data', '', true), ('reference', 'reference', '', true), ('uploads', 'uploads', '', true), ('working_dir', 'working_dir', '', true);
-
--- Populate checksum_algorithm table
-INSERT INTO qiita.checksum_algorithm (name) VALUES ('crc32');
-
--- Populate commands available
-INSERT INTO qiita.command (name, command, input, required, optional, output) VALUES
-('Summarize Taxa', 'summarize_taxa_through_plots.py', '{"--otu_table_fp":null}', '{}', '{"--mapping_category":null, "--mapping_fp":null,"--sort":null}', '{"--output_dir":null}'),
-('Beta Diversity', 'beta_diversity_through_plots.py', '{"--otu_table_fp":null,"--mapping_fp":null}', '{}', '{"--tree_fp":null,"--color_by_all_fields":null,"--seqs_per_sample":null}', '{"--output_dir":null}'),
-('Alpha Rarefaction', 'alpha_rarefaction.py', '{"--otu_table_fp":null,"--mapping_fp":null}', '{}', '{"--tree_fp":null,"--num_steps":null,"--min_rare_depth":null,"--max_rare_depth":null,"--retain_intermediate_files":false}', '{"--output_dir":null}');
-
--- Populate command_data_type table
-INSERT INTO qiita.command_data_type (command_id, data_type_id) VALUES (1,1), (1,2), (2,1), (2,2), (2,3), (2,4), (2,5), (2,6), (3,1), (3,2), (3,3), (3,4), (3,5), (3,6);
-
--- Set the autoincrementing study_id column to start at 10,000 so we don't overlap with existing (QIIME database) study IDs, which should be maintained
-SELECT setval('qiita.study_study_id_seq', 10000, false);
-
--- Initializing preprocessed_sequence_illumina_params to have 2 rows
--- The first row has the default values on QIIME
--- The second row has the default values on QIIME but rev_comp_mapping_barcodes is set to true
-INSERT INTO qiita.preprocessed_sequence_illumina_params (rev_comp_mapping_barcodes) VALUES (false), (true);
diff --git a/qiita_db/support_files/patches/1.sql b/qiita_db/support_files/patches/1.sql
deleted file mode 100644
index 1878589d2..000000000
--- a/qiita_db/support_files/patches/1.sql
+++ /dev/null
@@ -1,49 +0,0 @@
--- Nov 17, 2014
--- This patch adds to the database the structure needed for supporting environmental packages
-
-CREATE TABLE qiita.environmental_package (
- environmental_package_name varchar NOT NULL,
- metadata_table varchar NOT NULL,
- CONSTRAINT pk_environmental_package PRIMARY KEY ( environmental_package_name )
- ) ;
-
-COMMENT ON COLUMN qiita.environmental_package.environmental_package_name IS 'The name of the environmental package';
-
-COMMENT ON COLUMN qiita.environmental_package.metadata_table IS 'Contains the name of the table that contains the pre-defined metadata columns for the environmental package';
-
-CREATE TABLE qiita.study_environmental_package (
- study_id bigint NOT NULL,
- environmental_package_name varchar NOT NULL,
- CONSTRAINT pk_study_environmental_package PRIMARY KEY ( study_id, environmental_package_name )
- ) ;
-
-CREATE INDEX idx_study_environmental_package ON qiita.study_environmental_package ( study_id ) ;
-
-CREATE INDEX idx_study_environmental_package_0 ON qiita.study_environmental_package ( environmental_package_name ) ;
-
-COMMENT ON TABLE qiita.study_environmental_package IS 'Holds the 1 to many relationship between the study and the environmental_package';
-
-ALTER TABLE qiita.study_environmental_package ADD CONSTRAINT fk_study_environmental_package FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ;
-
-ALTER TABLE qiita.study_environmental_package ADD CONSTRAINT fk_study_environmental_package_0 FOREIGN KEY ( environmental_package_name ) REFERENCES qiita.environmental_package( environmental_package_name ) ;
-
--- We insert the environmental packages that we know
-INSERT INTO qiita.environmental_package (environmental_package_name, metadata_table) VALUES
- ('air', 'ep_air'),
- ('built environment', 'ep_built_environment'),
- ('host-associated', 'ep_host_associated'),
- ('human-amniotic-fluid', 'ep_human_amniotic_fluid'),
- ('human-associated', 'ep_human_associated'),
- ('human-blood', 'ep_human_blood'),
- ('human-gut', 'ep_human_gut'),
- ('human-oral', 'ep_human_oral'),
- ('human-skin', 'ep_human_skin'),
- ('human-urine', 'ep_human_urine'),
- ('human-vaginal', 'ep_human_vaginal'),
- ('microbial mat/biofilm', 'ep_microbial_mat_biofilm'),
- ('miscellaneous natural or artificial environment', 'ep_misc_artif'),
- ('plant-associated', 'ep_plant_associated'),
- ('sediment', 'ep_sediment'),
- ('soil', 'ep_soil'),
- ('wastewater/sludge', 'ep_wastewater_sludge'),
- ('water', 'ep_water');
diff --git a/qiita_db/support_files/patches/10.sql b/qiita_db/support_files/patches/10.sql
deleted file mode 100644
index 8b0604a38..000000000
--- a/qiita_db/support_files/patches/10.sql
+++ /dev/null
@@ -1,9 +0,0 @@
--- Dec 17, 2014
--- This patch renames the columns of the processed_params_sortmerna to match
--- the parameter names in the script
-
-ALTER TABLE qiita.processed_params_sortmerna RENAME COLUMN evalue TO sortmerna_e_value;
-
-ALTER TABLE qiita.processed_params_sortmerna RENAME COLUMN max_pos TO sortmerna_max_pos;
-
-ALTER TABLE qiita.processed_params_sortmerna RENAME COLUMN coverage TO sortmerna_coverage;
diff --git a/qiita_db/support_files/patches/11.sql b/qiita_db/support_files/patches/11.sql
deleted file mode 100644
index 904eda1d8..000000000
--- a/qiita_db/support_files/patches/11.sql
+++ /dev/null
@@ -1,49 +0,0 @@
--- Dec 17, 2014
--- Adding a new filepath_type = raw_sff
--- Adding 454 Parameters
-
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('raw_sff'), ('raw_fasta'), ('raw_qual');
-INSERT INTO qiita.filetype (type) VALUES ('FASTA');
-
-DROP TABLE qiita.preprocessed_sequence_454_params;
-CREATE TABLE qiita.preprocessed_sequence_454_params (
- preprocessed_params_id bigserial NOT NULL,
- param_set_name varchar NOT NULL,
- min_seq_len integer DEFAULT 200 NOT NULL,
- max_seq_len integer DEFAULT 1000 NOT NULL,
- trim_seq_length bool DEFAULT FALSE NOT NULL,
- min_qual_score integer DEFAULT 25 NOT NULL,
- max_ambig integer DEFAULT 6 NOT NULL,
- max_homopolymer integer DEFAULT 6 NOT NULL,
- max_primer_mismatch integer DEFAULT 0 NOT NULL,
- barcode_type varchar DEFAULT 'golay_12' NOT NULL,
- max_barcode_errors real DEFAULT 1.5 NOT NULL,
- disable_bc_correction bool DEFAULT FALSE NOT NULL,
- qual_score_window integer DEFAULT 0 NOT NULL,
- disable_primers bool DEFAULT FALSE NOT NULL,
- reverse_primers varchar DEFAULT 'disable' NOT NULL,
- reverse_primer_mismatches integer DEFAULT 0 NOT NULL,
- truncate_ambig_bases bool DEFAULT FALSE NOT NULL,
- CONSTRAINT pk_preprocessed_sequence_454_params PRIMARY KEY ( preprocessed_params_id )
- );
-
-COMMENT ON TABLE qiita.preprocessed_sequence_454_params
- IS 'Parameters used for processing 454 sequence data.';
-
-INSERT INTO qiita.preprocessed_sequence_454_params (param_set_name, barcode_type)
- VALUES ('Defaults with Golay 12 barcodes', 'golay_12'),
- ('Defaults with Hamming 8 barcodes', 'hamming_8');
-
--- add param set name to illumina sequence params. We're not setting defauft
--- as we need to update the existing parameter sets and then add in the
--- default
-ALTER TABLE qiita.preprocessed_sequence_illumina_params
- ADD COLUMN param_set_name varchar;
-
-UPDATE qiita.preprocessed_sequence_illumina_params
- SET param_set_name='Defaults' WHERE preprocessed_params_id=1;
-UPDATE qiita.preprocessed_sequence_illumina_params
- SET param_set_name='Defaults with reverse complement mapping file barcodes' WHERE preprocessed_params_id=2;
-
-ALTER TABLE qiita.preprocessed_sequence_illumina_params
- ALTER COLUMN param_set_name SET NOT NULL;
diff --git a/qiita_db/support_files/patches/12.sql b/qiita_db/support_files/patches/12.sql
deleted file mode 100644
index 2e437f3eb..000000000
--- a/qiita_db/support_files/patches/12.sql
+++ /dev/null
@@ -1,8 +0,0 @@
--- Dec 26, 2014
--- Adding barcode_type for Illumina Parameters
-
-INSERT INTO qiita.preprocessed_sequence_illumina_params (param_set_name, barcode_type, rev_comp_mapping_barcodes)
- VALUES ('barcode_type 8, defaults', '8', false),
- ('barcode_type 8, reverse complement mapping file barcodes', '8', true),
- ('barcode_type 6, defaults', '6', false),
- ('barcode_type 6, reverse complement mapping file barcodes', '6', true);
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/13.sql b/qiita_db/support_files/patches/13.sql
deleted file mode 100644
index df2e36b67..000000000
--- a/qiita_db/support_files/patches/13.sql
+++ /dev/null
@@ -1,3 +0,0 @@
-ALTER TABLE qiita.processed_params_sortmerna ADD param_set_name varchar(100) DEFAULT 'Default' NOT NULL;
-
-COMMENT ON COLUMN qiita.processed_params_sortmerna.param_set_name IS 'The name of the parameter set';
diff --git a/qiita_db/support_files/patches/14.sql b/qiita_db/support_files/patches/14.sql
deleted file mode 100644
index 2446e0375..000000000
--- a/qiita_db/support_files/patches/14.sql
+++ /dev/null
@@ -1,4 +0,0 @@
---Feb 11, 2015
---Placeholder for python to run
--- we don't really need an SQL patch, but having nothing here breaks the build
-select 42;
diff --git a/qiita_db/support_files/patches/15.sql b/qiita_db/support_files/patches/15.sql
deleted file mode 100644
index 2446e0375..000000000
--- a/qiita_db/support_files/patches/15.sql
+++ /dev/null
@@ -1,4 +0,0 @@
---Feb 11, 2015
---Placeholder for python to run
--- we don't really need an SQL patch, but having nothing here breaks the build
-select 42;
diff --git a/qiita_db/support_files/patches/16.sql b/qiita_db/support_files/patches/16.sql
deleted file mode 100644
index ec760a045..000000000
--- a/qiita_db/support_files/patches/16.sql
+++ /dev/null
@@ -1,100 +0,0 @@
---Feb 25, 2015
---Adds tables for analysis collection object
-CREATE TABLE qiita.collection_status (
- collection_status_id bigserial NOT NULL,
- status varchar(100) NOT NULL,
- CONSTRAINT pk_collection_status PRIMARY KEY ( collection_status_id )
- ) ;
-
-CREATE TABLE qiita.collection (
- collection_id bigserial NOT NULL,
- email varchar NOT NULL,
- name varchar(100) NOT NULL,
- description varchar ,
- collection_status_id bigint DEFAULT 1 NOT NULL,
- CONSTRAINT pk_collection PRIMARY KEY ( collection_id )
- ) ;
-
-CREATE INDEX idx_collection ON qiita.collection ( email ) ;
-
-CREATE INDEX idx_collection_0 ON qiita.collection ( collection_status_id ) ;
-
-COMMENT ON TABLE qiita.collection IS 'Tracks a group of analyses and important jobs for an overarching goal.';
-
-CREATE TABLE qiita.collection_analysis (
- collection_id bigint NOT NULL,
- analysis_id bigint NOT NULL,
- CONSTRAINT idx_collection_analysis PRIMARY KEY ( collection_id, analysis_id )
- ) ;
-
-CREATE INDEX idx_collection_analysis_0 ON qiita.collection_analysis ( collection_id ) ;
-
-CREATE INDEX idx_collection_analysis_1 ON qiita.collection_analysis ( analysis_id ) ;
-
-COMMENT ON TABLE qiita.collection_analysis IS 'Matches collection to analyses as one to many.';
-
-CREATE TABLE qiita.collection_job (
- collection_id bigint NOT NULL,
- job_id bigint NOT NULL,
- CONSTRAINT idx_collection_job_1 PRIMARY KEY ( collection_id, job_id )
- ) ;
-
-CREATE INDEX idx_collection_job ON qiita.collection_job ( collection_id ) ;
-
-CREATE INDEX idx_collection_job_0 ON qiita.collection_job ( job_id ) ;
-
-COMMENT ON TABLE qiita.collection_job IS 'Matches collection important jobs as one to many.';
-
-CREATE TABLE qiita.collection_users (
- collection_id bigint NOT NULL,
- email varchar NOT NULL,
- CONSTRAINT idx_collection_user PRIMARY KEY ( collection_id, email )
- ) ;
-
-CREATE INDEX idx_collection_user_0 ON qiita.collection_users ( collection_id ) ;
-
-CREATE INDEX idx_collection_user_1 ON qiita.collection_users ( email ) ;
-
-COMMENT ON TABLE qiita.collection_users IS 'Allows sharing of a collection';
-
-ALTER TABLE qiita.collection ADD CONSTRAINT fk_collection FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ;
-
-ALTER TABLE qiita.collection ADD CONSTRAINT fk_collection_0 FOREIGN KEY ( collection_status_id ) REFERENCES qiita.collection_status( collection_status_id ) ;
-
-ALTER TABLE qiita.collection_analysis ADD CONSTRAINT fk_collection_analysis FOREIGN KEY ( collection_id ) REFERENCES qiita.collection( collection_id ) ;
-
-ALTER TABLE qiita.collection_analysis ADD CONSTRAINT fk_collection_analysis_0 FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ;
-
-ALTER TABLE qiita.collection_job ADD CONSTRAINT fk_collection_job FOREIGN KEY ( collection_id ) REFERENCES qiita.collection( collection_id ) ;
-
-ALTER TABLE qiita.collection_job ADD CONSTRAINT fk_collection_job_0 FOREIGN KEY ( job_id ) REFERENCES qiita.job( job_id ) ;
-
-ALTER TABLE qiita.collection_users ADD CONSTRAINT fk_collection_user FOREIGN KEY ( collection_id ) REFERENCES qiita.collection( collection_id ) ;
-
-ALTER TABLE qiita.collection_users ADD CONSTRAINT fk_collection_user_email FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ;
-
---Insert collection statuses
-INSERT INTO qiita.collection_status (status) VALUES ('private'), ('public');
-
---Add Trigger to make sure jobs added to the collection_job table belong to the collection
-CREATE FUNCTION qiita.check_collection_access() RETURNS TRIGGER AS $job_access$
- BEGIN
- IF NOT EXISTS (
- SELECT aj.* FROM qiita.analysis_job aj
- LEFT JOIN qiita.collection_analysis ca
- ON aj.analysis_id = ca.analysis_id
- WHERE aj.job_id = NEW.job_id and ca.collection_id = NEW.collection_id
- ) THEN
- RAISE EXCEPTION 'Jobs inserted that do not belong to collection' USING ERRCODE = 'unique_violation';
- RETURN OLD;
- ELSE
- RETURN NEW;
- END IF;
- RETURN NULL;
- END;
- $job_access$ LANGUAGE plpgsql STABLE;
-
-CREATE TRIGGER verify_job_in_collection
- BEFORE INSERT ON qiita.collection_job
- FOR EACH ROW
- EXECUTE PROCEDURE qiita.check_collection_access();
diff --git a/qiita_db/support_files/patches/17.sql b/qiita_db/support_files/patches/17.sql
deleted file mode 100644
index 0115afea5..000000000
--- a/qiita_db/support_files/patches/17.sql
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-10 March 2015
-
-This patch removes the 100 character limit on the study_sample_columns's
-column_name column. To my knowledge, there is no reason for this limit.
-- Adam Robbins-Pianka
-*/
-alter table qiita.study_sample_columns alter column column_name type varchar;
diff --git a/qiita_db/support_files/patches/18.sql b/qiita_db/support_files/patches/18.sql
deleted file mode 100644
index cf6185912..000000000
--- a/qiita_db/support_files/patches/18.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- March 18, 2015
--- Add column to analysis table to mark shopping cart
-ALTER TABLE qiita.analysis ADD dflt bool NOT NULL DEFAULT false;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/19.sql b/qiita_db/support_files/patches/19.sql
deleted file mode 100644
index 6b6f95af9..000000000
--- a/qiita_db/support_files/patches/19.sql
+++ /dev/null
@@ -1,34 +0,0 @@
--- March 19, 2015
--- Removes the status from the study and adds it to the processed data table.
-
--- Modify the study_status_table so it becomes the processed_data_status table
-ALTER TABLE qiita.study_status RENAME TO processed_data_status;
-ALTER TABLE qiita.processed_data_status RENAME COLUMN study_status_id TO processed_data_status_id;
-ALTER TABLE qiita.processed_data_status RENAME COLUMN status TO processed_data_status;
-
--- The description of the statuses are referencing to the study, update them
--- so they refer to the processed data
-UPDATE qiita.processed_data_status SET description='Anyone can see this processed data' WHERE processed_data_status_id=2;
-UPDATE qiita.processed_data_status SET description='Only owner and shared users can see this processed data' WHERE processed_data_status_id=3;
-
--- Modify the processed_data table so it include the status column, which
--- is a FK to the processed_data_status table
-ALTER TABLE qiita.processed_data ADD processed_data_status_id bigint DEFAULT 4 NOT NULL;
-CREATE INDEX idx_processed_data_0 ON qiita.processed_data ( processed_data_status_id );
-ALTER TABLE qiita.processed_data ADD CONSTRAINT fk_processed_data_status FOREIGN KEY ( processed_data_status_id ) REFERENCES qiita.processed_data_status( processed_data_status_id );
-
--- We need to maintain the previous study status values. Those need to be
--- transferred to the processed data object.
-WITH study_values as (SELECT study_id, study_status_id FROM qiita.study)
-UPDATE qiita.processed_data as pd
- SET processed_data_status_id=sv.study_status_id
- FROM study_values sv
- WHERE pd.processed_data_id IN (
- SELECT processed_data_id FROM qiita.study_processed_data
- WHERE study_id = sv.study_id);
-
-
--- We no longer need the status column on the study table
-ALTER TABLE qiita.study DROP CONSTRAINT fk_study_study_status;
-DROP INDEX qiita.idx_study_0;
-ALTER TABLE qiita.study DROP COLUMN study_status_id;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/2.sql b/qiita_db/support_files/patches/2.sql
deleted file mode 100644
index 2e48c4094..000000000
--- a/qiita_db/support_files/patches/2.sql
+++ /dev/null
@@ -1,27 +0,0 @@
--- Nov 18, 2014
--- This patch updates the timeseries structures on the DB
-
--- We need to drop the unique constraint on timeseries_type
-ALTER TABLE qiita.timeseries_type DROP CONSTRAINT idx_timeseries_type;
-
--- We add a new column, intervention type, and we set the default to None because the column is not nullable
--- and since the database already contains some data, if we don't set a default, it fails on creation
-ALTER TABLE qiita.timeseries_type ADD intervention_type varchar DEFAULT 'None' NOT NULL;
-
--- Now the unique constraint applies to the tuple (timeseries_type, intervention_type)
-ALTER TABLE qiita.timeseries_type ADD CONSTRAINT idx_timeseries_type UNIQUE ( timeseries_type, intervention_type ) ;
-
--- We need to update the current entries on the table
-UPDATE qiita.timeseries_type SET timeseries_type='None',intervention_type='None' WHERE timeseries_type_id = 1;
-UPDATE qiita.timeseries_type SET timeseries_type='real',intervention_type='single intervention' WHERE timeseries_type_id = 2;
-UPDATE qiita.timeseries_type SET timeseries_type='real',intervention_type='multiple intervention' WHERE timeseries_type_id = 3;
-UPDATE qiita.timeseries_type SET timeseries_type='real',intervention_type='combo intervention' WHERE timeseries_type_id = 4;
-
--- Insert the rest of possible timeseries combinations
-INSERT INTO qiita.timeseries_type (timeseries_type, intervention_type) VALUES
- ('pseudo','single intervention'),
- ('pseudo','multiple intervention'),
- ('pseudo','combo intervention'),
- ('mixed','single intervention'),
- ('mixed','multiple intervention'),
- ('mixed','combo intervention');
diff --git a/qiita_db/support_files/patches/20.sql b/qiita_db/support_files/patches/20.sql
deleted file mode 100644
index 06a5f2f16..000000000
--- a/qiita_db/support_files/patches/20.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- March 19, 2015
--- Rename columns to be more descriptive and allow easier joins
-ALTER TABLE qiita.processed_data_status RENAME COLUMN description TO processed_data_status_description;
-ALTER TABLE qiita.portal_type RENAME COLUMN description TO portal_description;
-ALTER TABLE qiita.investigation RENAME COLUMN description TO investigation_description;
-ALTER TABLE qiita.investigation RENAME COLUMN name TO investigation_name;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/21.sql b/qiita_db/support_files/patches/21.sql
deleted file mode 100644
index 26f921296..000000000
--- a/qiita_db/support_files/patches/21.sql
+++ /dev/null
@@ -1,14 +0,0 @@
--- March 28, 2015
--- Add default analyses for all existing users
-DO $do$
-DECLARE
- eml varchar;
- aid bigint;
-BEGIN
-FOR eml IN
- SELECT email FROM qiita.qiita_user
-LOOP
- INSERT INTO qiita.analysis (email, name, description, dflt, analysis_status_id) VALUES (eml, eml || '-dflt', 'dflt', true, 1) RETURNING analysis_id INTO aid;
- INSERT INTO qiita.analysis_workflow (analysis_id, step) VALUES (aid, 2);
-END LOOP;
-END $do$;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/22.sql b/qiita_db/support_files/patches/22.sql
deleted file mode 100644
index 6896b175f..000000000
--- a/qiita_db/support_files/patches/22.sql
+++ /dev/null
@@ -1,14 +0,0 @@
--- April 16, 2015
--- Add primary key to analysis_sample table, first deleting the duplicates
--- http://stackoverflow.com/a/9862688
-DO $do$
-BEGIN
- CREATE TEMP TABLE temp_table
- ON COMMIT drop AS
- SELECT analysis_id, processed_data_id, sample_id
- FROM qiita.analysis_sample GROUP BY analysis_id, processed_data_id, sample_id;
- DELETE FROM qiita.analysis_sample;
- INSERT INTO qiita.analysis_sample (analysis_id, processed_data_id, sample_id) SELECT analysis_id, processed_data_id, sample_id FROM temp_table;
-
- ALTER TABLE qiita.analysis_sample ADD CONSTRAINT pk_analysis_sample PRIMARY KEY ( analysis_id, processed_data_id, sample_id );
-END $do$
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/23.sql b/qiita_db/support_files/patches/23.sql
deleted file mode 100644
index cd307b26e..000000000
--- a/qiita_db/support_files/patches/23.sql
+++ /dev/null
@@ -1,150 +0,0 @@
--- Apr 16, 2015
--- This patch relaxes the sample template metadata constraints on the database,
--- so from now on they're going to be enforced by code, except
--- required_sample_info_status which is completely deprecated
-
-DO $do$
-DECLARE
- dyn_t varchar;
- dyn_table varchar;
- st_id bigint;
-BEGIN
-
--- First, sample template
-FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE SUBSTR(table_name, 1, 7) = 'sample_'
- AND table_schema = 'qiita'
- AND table_name != 'sample_template_filepath'
-LOOP
- st_id := SUBSTR(dyn_t, 8)::int;
- dyn_table := 'qiita.' || dyn_t;
-
- -- Add the new columns to the study_sample_columns table
- INSERT INTO qiita.study_sample_columns (study_id, column_name, column_type)
- VALUES (st_id, 'physical_specimen_location', 'varchar'),
- (st_id, 'physical_specimen_remaining', 'bool'),
- (st_id, 'dna_extracted', 'bool'),
- (st_id, 'sample_type', 'varchar'),
- (st_id, 'collection_timestamp', 'timestamp'),
- (st_id, 'host_subject_id', 'varchar'),
- (st_id, 'description', 'varchar'),
- (st_id, 'latitude', 'float8'),
- (st_id, 'longitude', 'float8'),
- (st_id, 'required_sample_info_status', 'varchar');
-
- -- Add the new columns to the dynamic table
- EXECUTE 'ALTER TABLE ' || dyn_table || '
- ADD COLUMN physical_specimen_location varchar,
- ADD COLUMN physical_specimen_remaining boolean,
- ADD COLUMN dna_extracted boolean,
- ADD COLUMN sample_type varchar,
- ADD COLUMN collection_timestamp timestamp,
- ADD COLUMN host_subject_id varchar,
- ADD COLUMN description varchar,
- ADD COLUMN latitude float8,
- ADD COLUMN longitude float8,
- ADD COLUMN required_sample_info_status varchar;';
-
- -- Copy the values from the required_sample_info table to the dynamic table
- EXECUTE '
- WITH sv AS (SELECT * FROM qiita.required_sample_info
- JOIN qiita.required_sample_info_status
- USING (required_sample_info_status_id)
- WHERE study_id = ' || st_id || ')
- UPDATE ' || dyn_table || '
- SET physical_specimen_location=sv.physical_location,
- physical_specimen_remaining=sv.has_physical_specimen,
- dna_extracted=sv.has_extracted_data,
- sample_type=sv.sample_type,
- collection_timestamp=sv.collection_timestamp,
- host_subject_id=sv.host_subject_id,
- description=sv.description,
- latitude=sv.latitude,
- longitude=sv.longitude,
- required_sample_info_status=sv.status
- FROM sv
- WHERE ' || dyn_table || '.sample_id = sv.sample_id;';
-
-END LOOP;
-END $do$;
-
--- We can now drop the columns in the required_sample_info_table
-ALTER TABLE qiita.required_sample_info
- DROP COLUMN physical_location,
- DROP COLUMN has_physical_specimen,
- DROP COLUMN has_extracted_data,
- DROP COLUMN sample_type,
- DROP COLUMN required_sample_info_status_id,
- DROP COLUMN collection_timestamp,
- DROP COLUMN host_subject_id,
- DROP COLUMN description,
- DROP COLUMN latitude,
- DROP COLUMN longitude;
-
--- Since that table no longer stores required metadata,
--- we will rename it
-ALTER TABLE qiita.required_sample_info RENAME TO study_sample;
-
--- The table required_sample_info_status_id is no longer needed
-DROP TABLE qiita.required_sample_info_status;
-
--- Now, let's move the data for the prep templates
-DO $do$
-DECLARE
- dyn_t varchar;
- dyn_table varchar;
- prep_id bigint;
-BEGIN
-FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE SUBSTR(table_name, 1, 5) = 'prep_'
- AND table_schema = 'qiita'
- AND table_name NOT IN ('prep_template',
- 'prep_template_preprocessed_data',
- 'prep_template_filepath',
- 'prep_columns')
-LOOP
- prep_id := SUBSTR(dyn_t, 6)::int;
- dyn_table := 'qiita.' || dyn_t;
-
- -- Add the new columns to the prep_template_columns table
- INSERT INTO qiita.prep_columns (prep_template_id, column_name, column_type)
- VALUES (prep_id, 'center_name', 'varchar'),
- (prep_id, 'center_project_name', 'varchar'),
- (prep_id, 'emp_status', 'varchar');
-
- -- Add the new columns to the dynamic table
- EXECUTE 'ALTER TABLE ' || dyn_table || '
- ADD COLUMN center_name varchar,
- ADD COLUMN center_project_name varchar,
- ADD COLUMN emp_status varchar;';
-
- -- Copy the values from the common_prep_info table to the dynamic table
- EXECUTE '
- WITH sv AS (SELECT * FROM qiita.common_prep_info
- JOIN qiita.emp_status USING (emp_status_id)
- WHERE prep_template_id = ' || prep_id || ')
- UPDATE ' || dyn_table || '
- SET center_name=sv.center_name,
- center_project_name=sv.center_project_name,
- emp_status=sv.emp_status
- FROM sv
- WHERE ' || dyn_table || '.sample_id=sv.sample_id;';
-
-END LOOP;
-END $do$;
-
--- We can now drop the columns in the required_sample_info_table
-ALTER TABLE qiita.common_prep_info
- DROP COLUMN center_name,
- DROP COLUMN center_project_name,
- DROP COLUMN emp_status_id;
-
--- Since that table no longer stores common metadata, we will rename it
-ALTER TABLE qiita.common_prep_info RENAME to prep_template_sample;
-
--- The table emp_status is no longer needed
-DROP TABLE qiita.emp_status;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/24.sql b/qiita_db/support_files/patches/24.sql
deleted file mode 100644
index afae4b226..000000000
--- a/qiita_db/support_files/patches/24.sql
+++ /dev/null
@@ -1,57 +0,0 @@
--- May 7, 2015
--- This patch adds the ON UPDATE CASCADE constraint to all the FK
--- that are referencing the sample ids
-
-DO $do$
-DECLARE
- dyn_t varchar;
- fk_vals RECORD;
-BEGIN
-
--- The dynamic tables do not have a FK set on their sample ID
--- We need to find the dynamic tables existing in the system and we add the
--- FK constraint to them.
-FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE (SUBSTR(table_name, 1, 7) = 'sample_'
- OR SUBSTR(table_name, 1, 5) = 'prep_')
- AND table_schema = 'qiita'
- AND table_name NOT IN ('prep_template',
- 'prep_template_preprocessed_data',
- 'prep_template_filepath',
- 'prep_columns',
- 'sample_template_filepath',
- 'prep_template_sample')
-LOOP
- EXECUTE 'ALTER TABLE qiita.' || dyn_t || '
- ADD CONSTRAINT fk_' || dyn_t || '
- FOREIGN KEY (sample_id)'
- 'REFERENCES qiita.study_sample (sample_id);';
-END LOOP;
-
--- Search for all the tables that are pointing to the sample_id
--- and add the FK constraint with ON UPDATE CASCADE
-FOR fk_vals IN
- SELECT r.table_name, r.column_name, fk.constraint_name
- FROM information_schema.constraint_column_usage u
- INNER JOIN information_schema.referential_constraints fk
- ON u.constraint_catalog = fk.unique_constraint_catalog
- AND u.constraint_schema = fk.unique_constraint_schema
- AND u.constraint_name = fk.unique_constraint_name
- INNER JOIN information_schema.key_column_usage r
- ON r.constraint_catalog = fk.constraint_catalog
- AND r.constraint_schema = fk.constraint_schema
- AND r.constraint_name = fk.constraint_name
- WHERE u.column_name = 'sample_id'
- AND u.table_schema = 'qiita'
- AND u.table_name = 'study_sample'
-LOOP
- EXECUTE 'ALTER TABLE qiita.' || fk_vals.table_name || '
- DROP CONSTRAINT ' || fk_vals.constraint_name || ',
- ADD CONSTRAINT ' || fk_vals.constraint_name ||'
- FOREIGN KEY (' || fk_vals.column_name ||')
- REFERENCES qiita.study_sample( sample_id )
- ON UPDATE CASCADE;';
-END LOOP;
-END $do$;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/25.sql b/qiita_db/support_files/patches/25.sql
deleted file mode 100644
index 1a1ac12be..000000000
--- a/qiita_db/support_files/patches/25.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- May 19, 2015
-
-SELECT 42;
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/26.sql b/qiita_db/support_files/patches/26.sql
deleted file mode 100644
index 9d8167842..000000000
--- a/qiita_db/support_files/patches/26.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Jun 11, 2015
-
--- Updating FASTA-Sanger -> FASTA_Sanger, needed so we can put restrictions on
--- what kind of files the user can select in the GUI
-UPDATE qiita.filetype SET type='FASTA_Sanger' WHERE type = 'FASTA-Sanger';
-
--- Adding new filetype
-INSERT INTO qiita.filetype (type) VALUES ('per_sample_FASTQ');
-
-
--- Adding new illumina processing params if they do not exists
--- adapted from: http://stackoverflow.com/a/13902402
-INSERT INTO qiita.preprocessed_sequence_illumina_params (param_set_name, barcode_type)
- SELECT DISTINCT 'per sample FASTQ defaults', 'not-barcoded' FROM qiita.preprocessed_sequence_illumina_params
- WHERE NOT EXISTS (SELECT 1 FROM qiita.preprocessed_sequence_illumina_params WHERE barcode_type = 'not-barcoded');
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/27.sql b/qiita_db/support_files/patches/27.sql
deleted file mode 100644
index d74b01cb1..000000000
--- a/qiita_db/support_files/patches/27.sql
+++ /dev/null
@@ -1,76 +0,0 @@
--- June 11,, 2015
--- Adds ability to associate Analyses and Studies with multiple portals
--- and associates all existing studies + analyses with proper portal(s).
-
--- Remove existing portals and replace with more relevant ones
-UPDATE qiita.study SET portal_type_id = 2 WHERE portal_type_id = 3;
-
-DELETE FROM qiita.portal_type WHERE portal = 'QIIME_EMP';
-
-UPDATE qiita.portal_type
-SET portal = 'QIITA', portal_description = 'QIITA portal. Access to all data stored in database.'
-WHERE portal_type_id = 1;
-
--- Add analysis portal info to it's own table, as one analysis can be in two portals
-CREATE TABLE qiita.analysis_portal (
- analysis_id bigint NOT NULL,
- portal_type_id bigint NOT NULL,
- CONSTRAINT pk_analysis_portal PRIMARY KEY ( analysis_id, portal_type_id )
- );
-
-CREATE INDEX idx_analysis_portal ON qiita.analysis_portal ( analysis_id );
-
-CREATE INDEX idx_analysis_portal_0 ON qiita.analysis_portal ( portal_type_id );
-
-ALTER TABLE qiita.analysis_portal ADD CONSTRAINT fk_analysis_portal FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id );
-
-ALTER TABLE qiita.analysis_portal ADD CONSTRAINT fk_analysis_portal_0 FOREIGN KEY ( portal_type_id ) REFERENCES qiita.portal_type( portal_type_id );
-
-COMMENT ON TABLE qiita.analysis_portal IS 'Controls what analyses are visible on what portals';
-
--- Attach all existing analyses to the qiita portal
-INSERT INTO qiita.analysis_portal (analysis_id, portal_type_id) SELECT analysis_id, 1 FROM qiita.analysis;
-
--- Add new default analyses for other portals
-DO $do$
-DECLARE
- eml varchar;
- aid bigint;
- portal bigint;
-BEGIN
-FOR eml IN
- SELECT email FROM qiita.qiita_user
-LOOP
- FOR portal IN
- SELECT portal_type_id from qiita.portal_type WHERE portal_type_id != 1
- LOOP
- INSERT INTO qiita.analysis (email, name, description, dflt, analysis_status_id) VALUES (eml, eml || '-dflt', 'dflt', true, 1) RETURNING analysis_id INTO aid;
- INSERT INTO qiita.analysis_workflow (analysis_id, step) VALUES (aid, 2);
- INSERT INTO qiita.analysis_portal (analysis_id, portal_type_id) VALUES (aid, portal);
- END LOOP;
-END LOOP;
-END $do$;
-
--- Move study portal info to it's own table, as one study can be in multiple portals
-CREATE TABLE qiita.study_portal (
- study_id bigint NOT NULL,
- portal_type_id bigint NOT NULL,
- CONSTRAINT pk_study_portal PRIMARY KEY ( study_id, portal_type_id )
- );
-
-CREATE INDEX idx_study_portal ON qiita.study_portal ( study_id );
-
-CREATE INDEX idx_study_portal_0 ON qiita.study_portal ( portal_type_id );
-
-COMMENT ON TABLE qiita.study_portal IS 'Controls what studies are visible on what portals';
-
-ALTER TABLE qiita.study_portal ADD CONSTRAINT fk_study_portal FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id );
-
-ALTER TABLE qiita.study_portal ADD CONSTRAINT fk_study_portal_0 FOREIGN KEY ( portal_type_id ) REFERENCES qiita.portal_type( portal_type_id );
-
--- Attach all existing studies to the qiita portal and, if necessary, EMP portal
-INSERT INTO qiita.study_portal (study_id, portal_type_id) SELECT s.study_id, s.portal_type_id FROM qiita.study s;
-
-INSERT INTO qiita.study_portal (study_id, portal_type_id) SELECT s.study_id, 1 FROM qiita.study s WHERE s.portal_type_id != 1;
-
-ALTER TABLE qiita.study DROP COLUMN portal_type_id
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/28.sql b/qiita_db/support_files/patches/28.sql
deleted file mode 100644
index 1771205ba..000000000
--- a/qiita_db/support_files/patches/28.sql
+++ /dev/null
@@ -1,27 +0,0 @@
--- July 10, 2015
--- Adds the connections between the default analysis from new user and
--- the portals.
-
-DO $do$
-DECLARE
- aid bigint;
- portal bigint;
- rec RECORD;
-BEGIN
-
-FOR portal IN
- SELECT portal_type_id FROM qiita.portal_type
-LOOP
- FOR aid IN
- SELECT analysis_id
- FROM qiita.analysis
- WHERE dflt = TRUE
- AND name LIKE CONCAT('%-dflt-', portal)
- AND analysis_id NOT IN (
- SELECT analysis_id FROM qiita.analysis_portal)
- LOOP
- INSERT INTO qiita.analysis_portal (analysis_id, portal_type_id)
- VALUES (aid, portal);
- END LOOP;
-END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/29.sql b/qiita_db/support_files/patches/29.sql
deleted file mode 100644
index ede5d8bf0..000000000
--- a/qiita_db/support_files/patches/29.sql
+++ /dev/null
@@ -1,27 +0,0 @@
--- Aug 3, 2015
--- Adds tables for storing messages and messaging information
-
-CREATE TABLE qiita.message (
- message_id bigserial NOT NULL,
- message varchar NOT NULL,
- message_time timestamp DEFAULT current_timestamp NOT NULL,
- expiration timestamp,
- CONSTRAINT pk_message PRIMARY KEY ( message_id )
- );
-
-CREATE TABLE qiita.message_user (
- email varchar NOT NULL,
- message_id bigint NOT NULL,
- read bool DEFAULT 'false' NOT NULL,
- CONSTRAINT idx_message_user PRIMARY KEY ( email, message_id )
- );
-
-CREATE INDEX idx_message_user_0 ON qiita.message_user ( message_id );
-
-CREATE INDEX idx_message_user_1 ON qiita.message_user ( email );
-
-COMMENT ON COLUMN qiita.message_user.read IS 'Whether the message has been read or not.';
-
-ALTER TABLE qiita.message_user ADD CONSTRAINT fk_message_user FOREIGN KEY ( message_id ) REFERENCES qiita.message( message_id );
-
-ALTER TABLE qiita.message_user ADD CONSTRAINT fk_message_user_0 FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email );
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/3.sql b/qiita_db/support_files/patches/3.sql
deleted file mode 100644
index 76c686962..000000000
--- a/qiita_db/support_files/patches/3.sql
+++ /dev/null
@@ -1,3 +0,0 @@
-INSERT INTO qiita.study_status (status, description) VALUES ('sandbox', 'Only available to the owner. No sharing');
-
-UPDATE qiita.study SET study_status_id = 4 WHERE study_status_id = 1;
diff --git a/qiita_db/support_files/patches/30.sql b/qiita_db/support_files/patches/30.sql
deleted file mode 100644
index 1d1b383bf..000000000
--- a/qiita_db/support_files/patches/30.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- August 17, 2015
--- Make current string cols boolean if needed
--- Delete values that are now NaN
-SELECT 42;
diff --git a/qiita_db/support_files/patches/31.sql b/qiita_db/support_files/patches/31.sql
deleted file mode 100644
index 360fa6b56..000000000
--- a/qiita_db/support_files/patches/31.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- August 24, 2015
--- Delete all occurrences of '..' in the base_data_dir entry
-SELECT 42;
diff --git a/qiita_db/support_files/patches/32.sql b/qiita_db/support_files/patches/32.sql
deleted file mode 100644
index 0bf749be5..000000000
--- a/qiita_db/support_files/patches/32.sql
+++ /dev/null
@@ -1,41 +0,0 @@
--- September 22, 2015
--- Update the database schema to be able to store all the information that EBI
--- returns, and it is needed to perform further modifications/additions
--- to the information already present in EBI
-
-ALTER TABLE qiita.prep_template_sample ADD ebi_experiment_accession varchar ;
-ALTER TABLE qiita.study ADD ebi_study_accession varchar ;
-ALTER TABLE qiita.study ADD ebi_submission_status varchar NOT NULL DEFAULT 'not submitted';
-ALTER TABLE qiita.study_sample ADD ebi_sample_accession varchar ;
-ALTER TABLE qiita.study_sample ADD biosample_accession varchar ;
-
-CREATE TABLE qiita.ebi_run_accession (
- sample_id varchar NOT NULL,
- preprocessed_data_id bigint NOT NULL,
- ebi_run_accession varchar NOT NULL,
- CONSTRAINT idx_ebi_run_accession PRIMARY KEY ( sample_id, preprocessed_data_id, ebi_run_accession )
- ) ;
-CREATE INDEX idx_ebi_run_accession_sid ON qiita.ebi_run_accession ( sample_id ) ;
-CREATE INDEX idx_ebi_run_accession_ppd_id ON qiita.ebi_run_accession ( preprocessed_data_id ) ;
-ALTER TABLE qiita.ebi_run_accession ADD CONSTRAINT fk_ebi_run_accession FOREIGN KEY ( sample_id ) REFERENCES qiita.study_sample( sample_id ) ;
-ALTER TABLE qiita.ebi_run_accession ADD CONSTRAINT fk_ebi_run_accession_ppd FOREIGN KEY ( preprocessed_data_id ) REFERENCES qiita.preprocessed_data( preprocessed_data_id ) ;
-
--- Transfer the data from the old structure to the new one.
--- We currently don't have all the data, so we are just going to transfer
--- the study accession numbers and we will pull down the rest of the information
--- from EBI later
-WITH ebi_data AS (SELECT study_id, string_agg(ebi_study_accession, ', ') AS ebi_study_accessions
- FROM qiita.study_preprocessed_data
- JOIN qiita.preprocessed_data USING (preprocessed_data_id)
- WHERE ebi_study_accession IS NOT NULL
- GROUP BY study_id)
- UPDATE qiita.study AS st
- SET ebi_study_accession = ebi_data.ebi_study_accessions,
- ebi_submission_status = 'submitted'
- FROM ebi_data
- WHERE st.study_id = ebi_data.study_id;
-
--- Drop the old columns
-ALTER TABLE qiita.preprocessed_data DROP COLUMN submitted_to_insdc_status;
-ALTER TABLE qiita.preprocessed_data DROP COLUMN ebi_submission_accession;
-ALTER TABLE qiita.preprocessed_data DROP COLUMN ebi_study_accession;
diff --git a/qiita_db/support_files/patches/33.sql b/qiita_db/support_files/patches/33.sql
deleted file mode 100644
index a03703a15..000000000
--- a/qiita_db/support_files/patches/33.sql
+++ /dev/null
@@ -1,852 +0,0 @@
--- September 4, 2015
--- Change the database structure to remove the RawData, PreprocessedData and
--- ProcessedData division to unify it into the Artifact object
-
-CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-
--- Rename the id columns from the parameters tables
-ALTER TABLE qiita.processed_params_sortmerna RENAME COLUMN processed_params_id TO parameters_id;
-ALTER TABLE qiita.processed_params_uclust RENAME COLUMN processed_params_id TO parameters_id;
-ALTER TABLE qiita.preprocessed_sequence_454_params RENAME COLUMN preprocessed_params_id TO parameters_id;
-ALTER TABLE qiita.preprocessed_sequence_illumina_params RENAME COLUMN preprocessed_params_id TO parameters_id;
-ALTER TABLE qiita.preprocessed_spectra_params RENAME COLUMN preprocessed_params_id TO parameters_id;
-
--- Rename the table filetype
-ALTER TABLE qiita.filetype RENAME TO artifact_type;
-ALTER TABLE qiita.artifact_type RENAME COLUMN filetype_id TO artifact_type_id;
-ALTER TABLE qiita.artifact_type RENAME COLUMN type TO artifact_type;
-ALTER TABLE qiita.artifact_type ADD description varchar;
-
--- Rename the processed_data_status table
-ALTER TABLE qiita.processed_data_status RENAME TO visibility;
-ALTER TABLE qiita.visibility RENAME COLUMN processed_data_status_id TO visibility_id;
-ALTER TABLE qiita.visibility RENAME COLUMN processed_data_status TO visibility;
-ALTER TABLE qiita.visibility RENAME COLUMN processed_data_status_description TO visibility_description;
-UPDATE qiita.visibility
- SET visibility_description = 'Only visible to the owner and shared users'
- WHERE visibility = 'private';
-UPDATE qiita.visibility
- SET visibility_description = 'Visible to everybody'
- WHERE visibility = 'public';
-
--- Software table - holds the information of a given software package present
--- in the system and can be used to process an artifact
-CREATE TABLE qiita.software (
- software_id bigserial NOT NULL,
- name varchar NOT NULL,
- version varchar NOT NULL,
- description varchar NOT NULL,
- environment_script varchar NOT NULL,
- start_script varchar NOT NULL,
- CONSTRAINT pk_software PRIMARY KEY ( software_id )
- ) ;
-
--- software_command table - holds the information of a command in a given software
--- this table should be renamed to command once the command table in the
--- analysis table is merged with this one
-CREATE TABLE qiita.software_command (
- command_id bigserial NOT NULL,
- name varchar NOT NULL,
- software_id bigint NOT NULL,
- description varchar NOT NULL,
- CONSTRAINT pk_software_command PRIMARY KEY ( command_id )
- ) ;
-CREATE INDEX idx_software_command ON qiita.software_command ( software_id ) ;
-ALTER TABLE qiita.software_command ADD CONSTRAINT fk_software_command_software FOREIGN KEY ( software_id ) REFERENCES qiita.software( software_id );
-
--- command_parameter table - holds the parameters that a command accepts
-CREATE TABLE qiita.command_parameter (
- command_id bigint NOT NULL,
- parameter_name varchar NOT NULL,
- parameter_type varchar NOT NULL,
- required bool NOT NULL,
- default_value varchar ,
- CONSTRAINT idx_command_parameter_0 PRIMARY KEY ( command_id, parameter_name )
- ) ;
-CREATE INDEX idx_command_parameter ON qiita.command_parameter ( command_id ) ;
-ALTER TABLE qiita.command_parameter ADD CONSTRAINT fk_command_parameter FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-
--- default_parameter_set tables - holds the default parameter sets defined by
--- the system. If no arbitrary parameters are allowed in the system only the
--- ones listed here will be shown. Note that the only parameters that are listed
--- here are the ones that are not required, since the ones required do not
--- have a default
-CREATE TABLE qiita.default_parameter_set (
- default_parameter_set_id bigserial NOT NULL,
- command_id bigint NOT NULL,
- parameter_set_name varchar NOT NULL,
- parameter_set JSON NOT NULL,
- CONSTRAINT pk_default_parameter_set PRIMARY KEY ( default_parameter_set_id ),
- CONSTRAINT idx_default_parameter_set_0 UNIQUE ( command_id, parameter_set_name )
- ) ;
-CREATE INDEX idx_default_parameter_set ON qiita.default_parameter_set ( command_id ) ;
-ALTER TABLE qiita.default_parameter_set ADD CONSTRAINT fk_default_parameter_set FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-
--- Publication table - holds the minimum information for a given publication
--- It is useful to keep track of the publication of the studies and the software
--- used for processing artifacts
-CREATE TABLE qiita.publication (
- doi varchar NOT NULL,
- pubmed_id varchar ,
- CONSTRAINT pk_publication PRIMARY KEY ( doi )
- ) ;
-
--- Software publictation table - relates each software package with the list of
--- its related publciations
-CREATE TABLE qiita.software_publication (
- software_id bigint NOT NULL,
- publication_doi varchar NOT NULL,
- CONSTRAINT idx_software_publication_0 PRIMARY KEY ( software_id, publication_doi )
- ) ;
-CREATE INDEX idx_software_publication_software ON qiita.software_publication ( software_id ) ;
-CREATE INDEX idx_software_publication_publication ON qiita.software_publication ( publication_doi ) ;
-ALTER TABLE qiita.software_publication ADD CONSTRAINT fk_software_publication FOREIGN KEY ( software_id ) REFERENCES qiita.software( software_id ) ;
-ALTER TABLE qiita.software_publication ADD CONSTRAINT fk_software_publication_0 FOREIGN KEY ( publication_doi ) REFERENCES qiita.publication( doi ) ;
-
--- Study publication table - relates each study with the list of its related
--- publication
-CREATE TABLE qiita.study_publication (
- study_id bigint NOT NULL,
- publication_doi varchar NOT NULL,
- CONSTRAINT idx_study_publication_0 PRIMARY KEY ( study_id, publication_doi )
- ) ;
-CREATE INDEX idx_study_publication_study ON qiita.study_publication ( study_id ) ;
-CREATE INDEX idx_study_publication_doi ON qiita.study_publication ( publication_doi ) ;
-ALTER TABLE qiita.study_publication ADD CONSTRAINT fk_study_publication_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ;
-ALTER TABLE qiita.study_publication ADD CONSTRAINT fk_study_publication FOREIGN KEY ( publication_doi ) REFERENCES qiita.publication( doi ) ;
-
--- Artifact table - holds an abstract data object from the system
-CREATE TABLE qiita.artifact (
- artifact_id bigserial NOT NULL,
- generated_timestamp timestamp NOT NULL,
- command_id bigint ,
- command_parameters json ,
- visibility_id bigint NOT NULL,
- artifact_type_id integer ,
- data_type_id bigint NOT NULL,
- can_be_submitted_to_ebi bool DEFAULT 'FALSE' NOT NULL,
- can_be_submitted_to_vamps bool DEFAULT 'FALSE' NOT NULL,
- submitted_to_vamps bool DEFAULT 'FALSE' NOT NULL,
- CONSTRAINT pk_artifact PRIMARY KEY ( artifact_id )
- ) ;
-CREATE INDEX idx_artifact_0 ON qiita.artifact ( visibility_id ) ;
-CREATE INDEX idx_artifact_1 ON qiita.artifact ( artifact_type_id ) ;
-CREATE INDEX idx_artifact_2 ON qiita.artifact ( data_type_id ) ;
-CREATE INDEX idx_artifact ON qiita.artifact ( command_id ) ;
-COMMENT ON TABLE qiita.artifact IS 'Represents data in the system';
-COMMENT ON COLUMN qiita.artifact.visibility_id IS 'If the artifact is sandbox, awaiting_for_approval, private or public';
-ALTER TABLE qiita.artifact ADD CONSTRAINT fk_artifact_type FOREIGN KEY ( artifact_type_id ) REFERENCES qiita.artifact_type( artifact_type_id ) ;
-ALTER TABLE qiita.artifact ADD CONSTRAINT fk_artifact_visibility FOREIGN KEY ( visibility_id ) REFERENCES qiita.visibility( visibility_id ) ;
-ALTER TABLE qiita.artifact ADD CONSTRAINT fk_artifact_software_command FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-ALTER TABLE qiita.artifact ADD CONSTRAINT fk_artifact_data_type FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id ) ;
-
--- We need to keep the old preprocessed data id for the artifact id due
--- to EBI. In order to make sure that none of the raw data or processed
--- data that we are going to transfer to the artifact table gets and id needed
--- by the preprocessed data, we will set the autoincrementing
--- artifact_id column to start at 2,000
-SELECT setval('qiita.artifact_artifact_id_seq', 2000, false);
-
-
--- Artifact filepath table - relates an artifact with its files
-CREATE TABLE qiita.artifact_filepath (
- artifact_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_artifact_filepath PRIMARY KEY ( artifact_id, filepath_id )
- ) ;
-CREATE INDEX idx_artifact_filepath_artifact ON qiita.artifact_filepath ( artifact_id ) ;
-CREATE INDEX idx_artifact_filepath_filepath ON qiita.artifact_filepath ( filepath_id ) ;
-ALTER TABLE qiita.artifact_filepath ADD CONSTRAINT fk_artifact_filepath_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-ALTER TABLE qiita.artifact_filepath ADD CONSTRAINT fk_artifact_filepath_filepath FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id ) ;
-
--- Parent artifact table - keeps track of the procenance of a given artifact.
--- If an artifact doesn't have a parent it means that it was uploaded by the user.
-CREATE TABLE qiita.parent_artifact (
- artifact_id bigint NOT NULL,
- parent_id bigint NOT NULL,
- CONSTRAINT idx_parent_artifact PRIMARY KEY ( artifact_id, parent_id )
- ) ;
-CREATE INDEX idx_parent_artifact_artifact ON qiita.parent_artifact ( artifact_id ) ;
-CREATE INDEX idx_parent_artifact_parent ON qiita.parent_artifact ( parent_id ) ;
-ALTER TABLE qiita.parent_artifact ADD CONSTRAINT fk_parent_artifact_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-ALTER TABLE qiita.parent_artifact ADD CONSTRAINT fk_parent_artifact_parent FOREIGN KEY ( parent_id ) REFERENCES qiita.artifact( artifact_id ) ;
-
--- Study artifact table - relates each artifact with its study
-CREATE TABLE qiita.study_artifact (
- study_id bigint NOT NULL,
- artifact_id bigint NOT NULL,
- CONSTRAINT idx_study_artifact PRIMARY KEY ( study_id, artifact_id )
- ) ;
-CREATE INDEX idx_study_artifact_study ON qiita.study_artifact ( study_id ) ;
-CREATE INDEX idx_study_artifact_artifact ON qiita.study_artifact ( artifact_id ) ;
-ALTER TABLE qiita.study_artifact ADD CONSTRAINT fk_study_artifact_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ;
-ALTER TABLE qiita.study_artifact ADD CONSTRAINT fk_study_artifact_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-
--- Create a function to infer the visibility of the artifact from the
--- raw data
-CREATE FUNCTION infer_rd_status(rd_id bigint, st_id bigint) RETURNS bigint AS $$
- DECLARE
- result bigint;
- BEGIN
- CREATE TEMP TABLE irds_temp
- ON COMMIT DROP AS
- SELECT DISTINCT processed_data_status_id
- FROM qiita.processed_data
- JOIN qiita.preprocessed_processed_data USING (processed_data_id)
- JOIN qiita.prep_template_preprocessed_data USING (preprocessed_data_id)
- JOIN qiita.prep_template USING (prep_template_id)
- JOIN qiita.study_processed_data USING (processed_data_id)
- WHERE raw_data_id = rd_id AND study_id = st_id;
- IF EXISTS(SELECT * FROM irds_temp WHERE processed_data_status_id = 2) THEN
- result := 2;
- ELSIF EXISTS(SELECT * FROM irds_temp WHERE processed_data_status_id = 3) THEN
- result := 3;
- ELSIF EXISTS(SELECT * FROM irds_temp WHERE processed_data_status_id = 1) THEN
- result := 1;
- ELSE
- result := 4;
- END IF;
- DROP TABLE irds_temp;
- RETURN result;
- END;
-$$ LANGUAGE plpgsql;
-
--- Create a function to infer the visibility of the artifact from the
--- preprocessed data
-CREATE FUNCTION infer_ppd_status(ppd_id bigint) RETURNS bigint AS $$
- DECLARE
- result bigint;
- BEGIN
- CREATE TEMP TABLE ippds_temp
- ON COMMIT DROP AS
- SELECT DISTINCT processed_data_status_id
- FROM qiita.processed_data
- JOIN qiita.preprocessed_processed_data USING (processed_data_id)
- WHERE preprocessed_data_id = ppd_id;
- IF EXISTS(SELECT * FROM ippds_temp WHERE processed_data_status_id = 2) THEN
- result := 2;
- ELSIF EXISTS(SELECT * FROM ippds_temp WHERE processed_data_status_id = 3) THEN
- result := 3;
- ELSEIF EXISTS(SELECT * FROM ippds_temp WHERE processed_data_status_id = 3) THEN
- result := 1;
- ELSE
- result := 4;
- END IF;
- DROP TABLE ippds_temp;
- RETURN result;
- END;
-$$ LANGUAGE plpgsql;
-
--- Populate the software and software_command tables so we can assignt the
--- correct values to the preprocessed and processed tables
-INSERT INTO qiita.software (name, version, description, environment_script, start_script) VALUES
- ('QIIME', '1.9.1', 'Quantitative Insights Into Microbial Ecology (QIIME) is an open-source bioinformatics pipeline for performing microbiome analysis from raw DNA sequencing data', 'source activate qiita', 'start_target_gene');
-INSERT INTO qiita.publication (doi, pubmed_id) VALUES ('10.1038/nmeth.f.303', '20383131');
-INSERT INTO qiita.software_publication (software_id, publication_doi) VALUES (1, '10.1038/nmeth.f.303');
--- Magic number 1: we just created the software table and inserted the QIIME
--- software, which will receive the ID 1
-INSERT INTO qiita.software_command (software_id, name, description) VALUES
- (1, 'Split libraries FASTQ', 'Demultiplexes and applies quality control to FASTQ data'),
- (1, 'Split libraries', 'Demultiplexes and applies quality control to FASTA data'),
- (1, 'Pick closed-reference OTUs', 'OTU picking using a closed reference approach');
--- Populate the command_parameter table
--- Magic numbers: we just created the software command table and inserted 3 commands, so we know their ids
--- 1: Split libraries FASTQ - preprocessed_sequence_illumina_params
--- 2: Split libraries - preprocessed_sequence_454_params
--- 3: Pick closed-reference OTUs - processed_params_sortmerna
-INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value) VALUES
- (1, 'input_data', 'artifact', True, NULL),
- (1, 'max_bad_run_length', 'integer', False, '3'),
- (1, 'min_per_read_length_fraction', 'float', False, '0.75'),
- (1, 'sequence_max_n', 'integer', False, '0'),
- (1, 'rev_comp_barcode', 'bool', False, 'False'),
- (1, 'rev_comp_mapping_barcodes', 'bool', False, 'False'),
- (1, 'rev_comp', 'bool', False, 'False'),
- (1, 'phred_quality_threshold', 'integer', False, '3'),
- (1, 'barcode_type', 'string', False, 'golay_12'),
- (1, 'max_barcode_errors', 'float', False, '1.5'),
- (2, 'input_data', 'artifact', True, NULL),
- (2, 'min_seq_len', 'integer', False, '200'),
- (2, 'max_seq_len', 'integer', False, '1000'),
- (2, 'trim_seq_length', 'bool', False, 'False'),
- (2, 'min_qual_score', 'integer', False, '25'),
- (2, 'max_ambig', 'integer', False, '6'),
- (2, 'max_homopolymer', 'integer', False, '6'),
- (2, 'max_primer_mismatch', 'integer', False, '0'),
- (2, 'barcode_type', 'string', False, 'golay_12'),
- (2, 'max_barcode_errors', 'float', False, '1.5'),
- (2, 'disable_bc_correction', 'bool', False, 'False'),
- (2, 'qual_score_window', 'integer', False, '0'),
- (2, 'disable_primers', 'bool', False, 'False'),
- (2, 'reverse_primers', 'choice:["disable", "truncate_only", "truncate_remove"]', False, 'disable'),
- (2, 'reverse_primer_mismatches', 'integer', False, '0'),
- (2, 'truncate_ambi_bases', 'bool', False, 'False'),
- (3, 'input_data', 'artifact', True, NULL),
- (3, 'reference', 'reference', False, '1'),
- (3, 'sortmerna_e_value', 'float', False, '1'),
- (3, 'sortmerna_max_pos', 'integer', False, '10000'),
- (3, 'similarity', 'float', False, '0.97'),
- (3, 'sortmerna_coverage', 'float', False, '0.97'),
- (3, 'threads', 'integer', False, '1');
-
--- Populate the default_parameter_set table
-DO $do$
-DECLARE
- rec RECORD;
- val JSON;
-BEGIN
- -- Transfer the default parameters from the preprocessed_sequence_illumina_params table
- IF EXISTS(SELECT * FROM qiita.preprocessed_sequence_illumina_params) THEN
- FOR rec IN
- SELECT *
- FROM qiita.preprocessed_sequence_illumina_params
- ORDER BY parameters_id
- LOOP
- val := ('{"max_bad_run_length":' || rec.max_bad_run_length || ','
- '"min_per_read_length_fraction":' || rec.min_per_read_length_fraction || ','
- '"sequence_max_n":' || rec.sequence_max_n || ','
- '"rev_comp_barcode":' || rec.rev_comp_barcode || ','
- '"rev_comp_mapping_barcodes":' || rec.rev_comp_mapping_barcodes || ','
- '"rev_comp":' || rec.rev_comp || ','
- '"phred_quality_threshold":' || rec.phred_quality_threshold || ','
- '"barcode_type":"' || rec.barcode_type || '",'
- '"max_barcode_errors":' || rec.max_barcode_errors || '}')::json;
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (1, rec.param_set_name, val);
- END LOOP;
- ELSE
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (1, 'Defaults', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5}'::json);
- END IF;
-
- -- Transfer the default parameters from the preprocessed_sequence_454_params table
- IF EXISTS(SELECT * FROM qiita.preprocessed_sequence_454_params) THEN
- FOR rec IN
- SELECT *
- FROM qiita.preprocessed_sequence_454_params
- ORDER BY parameters_id
- LOOP
- val := ('{"min_seq_len":' || rec.min_seq_len || ','
- '"max_seq_len":' || rec.max_seq_len || ','
- '"trim_seq_length":' || rec.trim_seq_length || ','
- '"min_qual_score":' || rec.min_qual_score || ','
- '"max_ambig":' || rec.max_ambig || ','
- '"max_homopolymer":' || rec.max_homopolymer || ','
- '"max_primer_mismatch":' || rec.max_primer_mismatch || ','
- '"barcode_type":"' || rec.barcode_type || '",'
- '"max_barcode_errors":' || rec.max_barcode_errors || ','
- '"disable_bc_correction":' || rec.disable_bc_correction || ','
- '"qual_score_window":' || rec.qual_score_window || ','
- '"disable_primers":' || rec.disable_primers || ','
- '"reverse_primers":"' || rec.reverse_primers || '",'
- '"reverse_primer_mismatches":' || rec.reverse_primer_mismatches || ','
- '"truncate_ambi_bases":' || rec.truncate_ambig_bases || '}')::json;
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (2, rec.param_set_name, val);
- END LOOP;
- ELSE
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (2, 'Defaults', '{"min_seq_len":200,"max_seq_len":1000,"trim_seq_length":false,"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,"max_primer_mismatch":0,"barcode_type":"golay_12","max_barcode_errors":1.5,"disable_bc_correction":false,"qual_score_window":0,"disable_primers":false,"reverse_primers":"disable","reverse_primer_mismatches":0,"truncate_ambi_bases":false}'::json);
- END IF;
-
- -- Transfer the default parameters from the processed_params_sortmerna table
- IF EXISTS(SELECT * FROM qiita.processed_params_sortmerna) THEN
- FOR rec IN
- SELECT *
- FROM qiita.processed_params_sortmerna
- ORDER BY parameters_id
- LOOP
- val := ('{"reference":' || rec.reference_id || ','
- '"sortmerna_e_value":' || rec.sortmerna_e_value || ','
- '"sortmerna_max_pos":' || rec.sortmerna_max_pos || ','
- '"similarity":' || rec.similarity || ','
- '"sortmerna_coverage":' || rec.sortmerna_coverage || ','
- '"threads":' || rec.threads || '}')::json;
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (3, rec.param_set_name, val);
- END LOOP;
- ELSE
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (3, 'Defaults', '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1}'::json);
- END IF;
-END $do$;
-
--- Create tables to keep track of the processing jobs
-CREATE TABLE qiita.processing_job_status (
- processing_job_status_id bigserial NOT NULL,
- processing_job_status varchar NOT NULL,
- processing_job_status_description varchar NOT NULL,
- CONSTRAINT pk_processing_job_status PRIMARY KEY ( processing_job_status_id )
- ) ;
-
-INSERT INTO qiita.processing_job_status
- (processing_job_status, processing_job_status_description)
- VALUES ('queued', 'The job is waiting to be run'),
- ('running', 'The job is running'),
- ('success', 'The job completed successfully'),
- ('error', 'The job failed');
-
-CREATE TABLE qiita.processing_job (
- processing_job_id UUID DEFAULT uuid_generate_v4(),
- email varchar NOT NULL,
- command_id bigint NOT NULL,
- command_parameters json NOT NULL,
- processing_job_status_id bigint NOT NULL,
- logging_id bigint ,
- heartbeat timestamp ,
- step varchar ,
- CONSTRAINT pk_processing_job PRIMARY KEY ( processing_job_id )
- ) ;
-CREATE INDEX idx_processing_job_email ON qiita.processing_job ( email ) ;
-CREATE INDEX idx_processing_job_command_id ON qiita.processing_job ( command_id ) ;
-CREATE INDEX idx_processing_job_status_id ON qiita.processing_job ( processing_job_status_id ) ;
-CREATE INDEX idx_processing_job_logging ON qiita.processing_job ( logging_id ) ;
-COMMENT ON COLUMN qiita.processing_job.email IS 'The user that launched the job';
-COMMENT ON COLUMN qiita.processing_job.command_id IS 'The command launched';
-COMMENT ON COLUMN qiita.processing_job.command_parameters IS 'The parameters used in the command';
-COMMENT ON COLUMN qiita.processing_job.logging_id IS 'In case of failure, point to the log entry that holds more information about the error';
-COMMENT ON COLUMN qiita.processing_job.heartbeat IS 'The last heartbeat received by this job';
-ALTER TABLE qiita.processing_job ADD CONSTRAINT fk_processing_job_qiita_user FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ;
-ALTER TABLE qiita.processing_job ADD CONSTRAINT fk_processing_job FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-ALTER TABLE qiita.processing_job ADD CONSTRAINT fk_processing_job_status FOREIGN KEY ( processing_job_status_id ) REFERENCES qiita.processing_job_status( processing_job_status_id ) ;
-ALTER TABLE qiita.processing_job ADD CONSTRAINT fk_processing_job_logging FOREIGN KEY ( logging_id ) REFERENCES qiita.logging( logging_id ) ;
-
-CREATE TABLE qiita.artifact_processing_job (
- artifact_id bigint NOT NULL,
- processing_job_id UUID NOT NULL,
- CONSTRAINT idx_artifact_processing_job PRIMARY KEY ( artifact_id, processing_job_id )
- ) ;
-CREATE INDEX idx_artifact_processing_job_artifact ON qiita.artifact_processing_job ( artifact_id ) ;
-CREATE INDEX idx_artifact_processing_job_job ON qiita.artifact_processing_job ( processing_job_id ) ;
-ALTER TABLE qiita.artifact_processing_job ADD CONSTRAINT fk_artifact_processing_job FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-ALTER TABLE qiita.artifact_processing_job ADD CONSTRAINT fk_artifact_processing_job_0 FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-
--- Create a function to correctly choose the commnad id for the preprocessed
--- data
-CREATE FUNCTION choose_command_id(ppd_params_table varchar) RETURNS bigint AS $$
- BEGIN
- IF ppd_params_table = 'preprocessed_sequence_illumina_params' THEN
- RETURN 1;
- ELSE
- RETURN 2;
- END IF;
- END;
-$$ LANGUAGE plpgsql;
-
--- Create a function to correctly generate the parameters used to generate the artifact
-CREATE FUNCTION generate_params(command_id bigint, params_id bigint, parent_id bigint) RETURNS json AS $$
- DECLARE
- c1_rec qiita.preprocessed_sequence_illumina_params%ROWTYPE;
- c2_rec qiita.preprocessed_sequence_454_params%ROWTYPE;
- c3_rec qiita.processed_params_sortmerna%ROWTYPE;
- val json;
- BEGIN
- IF command_id = 1 THEN
- SELECT * INTO c1_rec
- FROM qiita.preprocessed_sequence_illumina_params
- WHERE parameters_id = params_id;
- val := ('{"max_bad_run_length":' || c1_rec.max_bad_run_length || ','
- '"min_per_read_length_fraction":' || c1_rec.min_per_read_length_fraction || ','
- '"sequence_max_n":' || c1_rec.sequence_max_n || ','
- '"rev_comp_barcode":' || c1_rec.rev_comp_barcode || ','
- '"rev_comp_mapping_barcodes":' || c1_rec.rev_comp_mapping_barcodes || ','
- '"rev_comp":' || c1_rec.rev_comp || ','
- '"phred_quality_threshold":' || c1_rec.phred_quality_threshold || ','
- '"barcode_type":"' || c1_rec.barcode_type || '",'
- '"max_barcode_errors":' || c1_rec.max_barcode_errors || ','
- '"input_data":' || parent_id || '}')::json;
- ELSIF command_id = 2 THEN
- SELECT * INTO c2_rec
- FROM qiita.preprocessed_sequence_454_params
- WHERE parameters_id = params_id;
- val := ('{"min_seq_len":' || c2_rec.min_seq_len || ','
- '"max_seq_len":' || c2_rec.max_seq_len || ','
- '"trim_seq_length":' || c2_rec.trim_seq_length || ','
- '"min_qual_score":' || c2_rec.min_qual_score || ','
- '"max_ambig":' || c2_rec.max_ambig || ','
- '"max_homopolymer":' || c2_rec.max_homopolymer || ','
- '"max_primer_mismatch":' || c2_rec.max_primer_mismatch || ','
- '"barcode_type":"' || c2_rec.barcode_type || '",'
- '"max_barcode_errors":' || c2_rec.max_barcode_errors || ','
- '"disable_bc_correction":' || c2_rec.disable_bc_correction || ','
- '"qual_score_window":' || c2_rec.qual_score_window || ','
- '"disable_primers":' || c2_rec.disable_primers || ','
- '"reverse_primers":"' || c2_rec.reverse_primers || '",'
- '"reverse_primer_mismatches":' || c2_rec.reverse_primer_mismatches || ','
- '"truncate_ambi_bases":' || c2_rec.truncate_ambig_bases || ','
- '"input_data":' || parent_id || '}')::json;
- ELSE
- SELECT * INTO c3_rec
- FROM qiita.processed_params_sortmerna
- WHERE parameters_id = params_id;
- val := ('{"reference":' || c3_rec.reference_id || ','
- '"sortmerna_e_value":' || c3_rec.sortmerna_e_value || ','
- '"sortmerna_max_pos":' || c3_rec.sortmerna_max_pos || ','
- '"similarity":' || c3_rec.similarity || ','
- '"sortmerna_coverage":' || c3_rec.sortmerna_coverage || ','
- '"threads":' || c3_rec.threads || ','
- '"input_data":' || parent_id || '}')::json;
- END IF;
- RETURN val;
- END;
-$$ LANGUAGE plpgsql;
-
--- We need to modify the prep template table to point to the artifact table
--- rather than to the raw data table
-ALTER TABLE qiita.prep_template ADD artifact_id bigint;
-CREATE INDEX idx_prep_template_artifact_id ON qiita.prep_template (artifact_id);
-ALTER TABLE qiita.prep_template ADD CONSTRAINT fk_prep_template_artifact
- FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact(artifact_id);
-
--- We need to modify the ebi run accession table to point to the artifact table
--- rather than to the preprocessed data table
-ALTER TABLE qiita.ebi_run_accession ADD artifact_id bigint;
-CREATE INDEX idx_ebi_run_accession_artifact_id ON qiita.ebi_run_accession (artifact_id);
-ALTER TABLE qiita.ebi_run_accession ADD CONSTRAINT fk_ebi_run_accesion_artifact
- FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact(artifact_id);
-
--- We need to modify the analysis_sample table to point to the artifact table
--- rather than to the processed data table
-ALTER TABLE qiita.analysis_sample ADD artifact_id bigint;
-CREATE INDEX idx_analysis_sample_artifact_id ON qiita.analysis_sample ( artifact_id ) ;
-ALTER TABLE qiita.analysis_sample ADD CONSTRAINT fk_analysis_sample_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-
--- Move the data!
-DO $do$
-DECLARE
- pt_vals RECORD;
- ppd_vals RECORD;
- pd_vals RECORD;
- rd_fp_vals RECORD;
- ppd_fp_vals RECORD;
- pd_fp_vals RECORD;
- study_pmids RECORD;
- a_type RECORD;
- rd_vis_id bigint;
- ppd_vis_id bigint;
- rd_a_id bigint;
- ppd_a_id bigint;
- pd_a_id bigint;
- demux_type_id bigint;
- biom_type_id bigint;
- ppd_cmd_id bigint;
- job_id UUID;
- params json;
-BEGIN
- -- We need a new artifact type for representing demultiplexed data (the
- -- only type of preprocessed data that we have at this point) and
- -- OTU table (the only type of processed data that we have at this point)
- INSERT INTO qiita.artifact_type (artifact_type, description)
- VALUES ('Demultiplexed', 'Demultiplexed and QC sequeneces')
- RETURNING artifact_type_id INTO demux_type_id;
- INSERT INTO qiita.artifact_type (artifact_type, description)
- VALUES ('BIOM', 'BIOM table')
- RETURNING artifact_type_id INTO biom_type_id;
-
- -- Loop through all the prep templates. We are going to transfer all the data
- -- using the following schema (->* means 1 to N relationship)
- -- prep_template -> raw_data ->* preprocessed_data ->* processed_data ->* analysis_sample
- -- Using this approach will duplicate the raw data objects. However, this is
- -- intentional as the raw data sharing should be done at filepath level rather
- -- than at raw data level. See issue #1459.
- FOR pt_vals IN
- SELECT prep_template_id, raw_data_id, filetype_id, study_id, data_type_id, email
- FROM qiita.prep_template
- JOIN qiita.raw_data USING (raw_data_id)
- JOIN qiita.study_prep_template USING (prep_template_id)
- JOIN qiita.study USING (study_id)
- WHERE raw_data_id IS NOT NULL
- LOOP
- -- Move the raw_data
- -- Get the visibility of the current raw data
- SELECT infer_rd_status(pt_vals.raw_data_id, pt_vals.study_id) INTO rd_vis_id;
-
- -- Insert the raw data in the artifact table
- INSERT INTO qiita.artifact (generated_timestamp, visibility_id, artifact_type_id, data_type_id)
- VALUES (now(), rd_vis_id, pt_vals.filetype_id, pt_vals.data_type_id)
- RETURNING artifact_id INTO rd_a_id;
-
- -- Relate the artifact with their studies
- INSERT INTO qiita.study_artifact (study_id, artifact_id)
- VALUES (pt_vals.study_id, rd_a_id);
-
- -- Relate the artifact with their filepaths
- FOR rd_fp_vals IN
- SELECT filepath_id
- FROM qiita.raw_filepath
- WHERE raw_data_id = pt_vals.raw_data_id
- LOOP
- INSERT INTO qiita.artifact_filepath (filepath_id, artifact_id)
- VALUES (rd_fp_vals.filepath_id, rd_a_id);
- END LOOP;
-
- -- Update the prep template table to point to the newly created artifact
- UPDATE qiita.prep_template
- SET artifact_id = rd_a_id
- WHERE prep_template_id = pt_vals.prep_template_id;
-
- -- Move the preprocessed data that has been generated from this prep template
- -- and, by extension, by the current raw data
- FOR ppd_vals IN
- SELECT preprocessed_data_id, preprocessed_params_table, preprocessed_params_id,
- data_type_id, submitted_to_vamps_status, processing_status, data_type_id
- FROM qiita.preprocessed_data
- JOIN qiita.prep_template_preprocessed_data USING (preprocessed_data_id)
- WHERE prep_template_id = pt_vals.prep_template_id
- LOOP
- -- Get the visibility of the current raw data
- SELECT infer_ppd_status(ppd_vals.preprocessed_data_id) INTO ppd_vis_id;
-
- -- Get the correct command id
- SELECT choose_command_id(ppd_vals.preprocessed_params_table) INTO ppd_cmd_id;
-
- -- Get the correct parameters
- SELECT generate_params(ppd_cmd_id, ppd_vals.preprocessed_params_id, rd_a_id) INTO params;
-
- -- Insert the preprocessed data in the artifact table
- INSERT INTO qiita.artifact (artifact_id, generated_timestamp, visibility_id,
- artifact_type_id, data_type_id, command_id,
- command_parameters, can_be_submitted_to_ebi,
- can_be_submitted_to_vamps)
- VALUES (ppd_vals.preprocessed_data_id, now(), ppd_vis_id,
- demux_type_id, ppd_vals.data_type_id, ppd_cmd_id,
- params, TRUE, TRUE)
- RETURNING artifact_id INTO ppd_a_id;
-
- -- Insert the job that created this preprocessed data
- -- Magic number 3: success status - if we have an artifact
- -- is because the job completed successfully
- INSERT INTO qiita.processing_job (email, command_id, command_parameters,
- processing_job_status_id)
- VALUES (pt_vals.email, ppd_cmd_id, params, 3)
- RETURNING processing_job_id INTO job_id;
-
- -- Link the parent with the job
- INSERT INTO qiita.artifact_processing_job (artifact_id, processing_job_id)
- VALUES (rd_a_id, job_id);
-
- -- Relate the artifact with the study
- INSERT INTO qiita.study_artifact (study_id, artifact_id)
- VALUES (pt_vals.study_id, ppd_a_id);
-
- -- Relate the artifact with their filepaths
- FOR ppd_fp_vals IN
- SELECT filepath_id
- FROM qiita.preprocessed_filepath
- WHERE preprocessed_data_id = ppd_vals.preprocessed_data_id
- LOOP
- INSERT INTO qiita.artifact_filepath (filepath_id, artifact_id)
- VALUES (ppd_fp_vals.filepath_id, ppd_a_id);
- END LOOP;
-
- -- Relate the artifact with its parent
- INSERT INTO qiita.parent_artifact (artifact_id, parent_id)
- VALUES (ppd_a_id, rd_a_id);
-
- -- Update the run ebi accession table so it point to the correct
- -- artifact rather than the preprocessed data
- UPDATE qiita.ebi_run_accession
- SET artifact_id = ppd_a_id
- WHERE preprocessed_data_id = ppd_vals.preprocessed_data_id;
-
- -- Update VAMPS value in case that it was submitted to VAMPS
- IF ppd_vals.submitted_to_vamps_status = 'submitted' THEN
- UPDATE qiita.artifact
- SET submitted_to_vamps = TRUE
- WHERE artifact_id = ppd_a_id;
- END IF;
-
- -- Move the processed data that has been generated from this
- -- preprocessed data
- FOR pd_vals IN
- SELECT processed_data_id, processed_params_table, processed_params_id,
- processed_date, data_type_id, processed_data_status_id, data_type_id
- FROM qiita.processed_data
- JOIN qiita.preprocessed_processed_data USING (processed_data_id)
- WHERE preprocessed_data_id = ppd_vals.preprocessed_data_id
- LOOP
- -- Get the correct parameters
- SELECT generate_params(3, pd_vals.processed_params_id, ppd_a_id) INTO params;
-
- -- Insert the processed data in the artifact table
- -- Magic number 3: we've created the software_command table here
- -- and we know the order that we inserted the commands. The
- -- OTU pickking command is the number 3
- INSERT INTO qiita.artifact (generated_timestamp, visibility_id,
- artifact_type_id, data_type_id, command_id,
- command_parameters)
- VALUES (pd_vals.processed_date, pd_vals.processed_data_status_id,
- biom_type_id, ppd_vals.data_type_id, 3, params)
- RETURNING artifact_id into pd_a_id;
-
- -- Insert the job that created this processed data
- -- Magic number 3: success status - if we have an artifact
- -- is because the job completed successfully
- INSERT INTO qiita.processing_job (email, command_id, command_parameters,
- processing_job_status_id)
- VALUES (pt_vals.email, 3, params, 3)
- RETURNING processing_job_id INTO job_id;
-
- -- Link the parent with the job
- INSERT INTO qiita.artifact_processing_job (artifact_id, processing_job_id)
- VALUES (ppd_a_id, job_id);
-
- -- Relate the artifact with the study
- INSERT INTO qiita.study_artifact (study_id, artifact_id)
- VALUES (pt_vals.study_id, pd_a_id);
-
- -- Relate the artifact with their filepaths
- FOR pd_fp_vals IN
- SELECT filepath_id
- FROM qiita.processed_filepath
- WHERE processed_data_id = pd_vals.processed_data_id
- LOOP
- INSERT INTO qiita.artifact_filepath (filepath_id, artifact_id)
- VALUES (pd_fp_vals.filepath_id, pd_a_id);
- END LOOP;
-
- -- Relate the artifact with its parent
- INSERT INTO qiita.parent_artifact (artifact_id, parent_id)
- VALUES (pd_a_id, ppd_a_id);
-
- -- Update the analysis sample table so it points to the correct
- -- artifact
- UPDATE qiita.analysis_sample
- SET artifact_id = pd_a_id
- WHERE processed_data_id = pd_vals.processed_data_id;
- END LOOP;
- END LOOP;
- END LOOP;
-
- -- Move the study_pmid information to the publication and study_publication
- -- tables
- FOR study_pmids IN
- SELECT study_id, pmid
- FROM qiita.study_pmid
- LOOP
- INSERT INTO qiita.publication (doi, pubmed_id)
- SELECT study_pmids.pmid, study_pmids.pmid
- WHERE NOT EXISTS(
- SELECT doi FROM qiita.publication WHERE doi = study_pmids.pmid);
-
- INSERT INTO qiita.study_publication (study_id, publication_doi)
- VALUES (study_pmids.study_id, study_pmids.pmid);
- END LOOP;
-
- -- The column subdirectory in the data_directory was unused
- -- We are going to "recycle" it so we can indicate which mountpoints use the
- -- new file structure in which a subdirectory for the artifact is created and
- -- the files are stored under such subdirectory, rather than just prefixing
- -- the files with the artifact_id
- ALTER TABLE qiita.data_directory ALTER COLUMN subdirectory SET DATA TYPE bool USING FALSE;
- ALTER TABLE qiita.data_directory ALTER COLUMN subdirectory SET DEFAULT FALSE;
- ALTER TABLE qiita.data_directory ALTER COLUMN subdirectory SET NOT NULL;
-
- -- The artifacts will be stored now based on the artifact type
- -- Add the new mountpoints to the qiita.data_directory table
- FOR a_type IN
- SELECT artifact_type
- FROM qiita.artifact_type
- LOOP
- INSERT INTO qiita.data_directory (data_type, mountpoint, subdirectory, active)
- VALUES (a_type.artifact_type, a_type.artifact_type, true, true);
- END LOOP;
-END $do$;
-
--- Set the NOT NULL constraints that we couldn't set before because we were
--- transferring the data from the old structure
-ALTER TABLE qiita.ebi_run_accession ALTER COLUMN artifact_id SET NOT NULL;
-ALTER TABLE qiita.analysis_sample ALTER COLUMN artifact_id SET NOT NULL;
-
-ALTER TABLE qiita.analysis_sample DROP CONSTRAINT pk_analysis_sample;
-ALTER TABLE qiita.analysis_sample ADD CONSTRAINT pk_analysis_sample PRIMARY KEY ( analysis_id, artifact_id, sample_id ) ;
-
--- Drop the function that we use to infer the status of the raw data and
--- preprocessed artifact, as well as the function to get the correct parameter
-DROP FUNCTION infer_rd_status(bigint, bigint);
-DROP FUNCTION infer_ppd_status(bigint);
-DROP FUNCTION choose_command_id(varchar);
-DROP FUNCTION generate_params(bigint, bigint, bigint);
-
--- Drop the old SQL structure from the schema
-ALTER TABLE qiita.prep_template DROP COLUMN raw_data_id;
-ALTER TABLE qiita.ebi_run_accession DROP COLUMN preprocessed_data_id;
-ALTER TABLE qiita.analysis_sample DROP COLUMN processed_data_id;
-DROP TABLE qiita.preprocessed_processed_data;
-DROP TABLE qiita.study_processed_data;
-DROP TABLE qiita.processed_filepath;
-DROP TABLE qiita.processed_data;
-DROP TABLE qiita.preprocessed_filepath;
-DROP TABLE qiita.study_preprocessed_data;
-DROP TABLE qiita.prep_template_preprocessed_data;
-DROP TABLE qiita.preprocessed_data;
-DROP TABLE qiita.raw_filepath;
-DROP TABLE qiita.raw_data;
-DROP TABLE qiita.study_pmid;
-DROP TABLE qiita.processed_params_uclust;
-DROP TABLE qiita.processed_params_sortmerna;
-DROP TABLE qiita.preprocessed_sequence_454_params;
-DROP TABLE qiita.preprocessed_sequence_illumina_params;
-DROP TABLE qiita.preprocessed_spectra_params;
-
--- Create a function to return the roots of an artifact, i.e. the source artifacts
-CREATE FUNCTION qiita.find_artifact_roots(a_id bigint) RETURNS SETOF bigint AS $$
-BEGIN
- IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE artifact_id = a_id) THEN
- RETURN QUERY WITH RECURSIVE root AS (
- SELECT artifact_id, parent_id
- FROM qiita.parent_artifact
- WHERE artifact_id = a_id
- UNION
- SELECT p.artifact_id, p.parent_id
- FROM qiita.parent_artifact p
- JOIN root r ON (r.parent_id = p.artifact_id)
- )
- SELECT DISTINCT parent_id
- FROM root
- WHERE parent_id NOT IN (SELECT artifact_id
- FROM qiita.parent_artifact);
- ELSE
- RETURN QUERY SELECT a_id;
- END IF;
-END
-$$ LANGUAGE plpgsql;
-
-
--- Create a function to return the ancestors of an Artifact
-CREATE FUNCTION qiita.artifact_ancestry(a_id bigint) RETURNS SETOF qiita.parent_artifact AS $$
-BEGIN
- IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE artifact_id = a_id) THEN
- RETURN QUERY WITH RECURSIVE root AS (
- SELECT artifact_id, parent_id
- FROM qiita.parent_artifact
- WHERE artifact_id = a_id
- UNION
- SELECT p.artifact_id, p.parent_id
- FROM qiita.parent_artifact p
- JOIN root r ON (r.parent_id = p.artifact_id)
- )
- SELECT DISTINCT artifact_id, parent_id
- FROM root;
- END IF;
-END
-$$ LANGUAGE plpgsql;
-
--- Create a function to return the descendants of an artifact
-CREATE FUNCTION qiita.artifact_descendants(a_id bigint) RETURNS SETOF qiita.parent_artifact AS $$
-BEGIN
- IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE parent_id = a_id) THEN
- RETURN QUERY WITH RECURSIVE root AS (
- SELECT artifact_id, parent_id
- FROM qiita.parent_artifact
- WHERE parent_id = a_id
- UNION
- SELECT p.artifact_id, p.parent_id
- FROM qiita.parent_artifact p
- JOIN root r ON (r.artifact_id = p.parent_id)
- )
- SELECT DISTINCT artifact_id, parent_id
- FROM root;
- END IF;
-END
-$$ LANGUAGE plpgsql;
diff --git a/qiita_db/support_files/patches/34.sql b/qiita_db/support_files/patches/34.sql
deleted file mode 100644
index 3dcc3e1ed..000000000
--- a/qiita_db/support_files/patches/34.sql
+++ /dev/null
@@ -1,18 +0,0 @@
--- Dec 5, 2015
--- Adds table needed for oauth2 authentication
-
-CREATE TABLE qiita.oauth_identifiers (
- client_id varchar(50) NOT NULL,
- client_secret varchar(255),
- CONSTRAINT pk_oauth2 PRIMARY KEY ( client_id )
- );
-
- CREATE TABLE qiita.oauth_software (
- software_id bigint NOT NULL,
- client_id varchar NOT NULL,
- CONSTRAINT idx_oauth_software PRIMARY KEY ( software_id, client_id )
- ) ;
-CREATE INDEX idx_oauth_software_software ON qiita.oauth_software ( software_id ) ;
-CREATE INDEX idx_oauth_software_client ON qiita.oauth_software ( client_id ) ;
-ALTER TABLE qiita.oauth_software ADD CONSTRAINT fk_oauth_software_software FOREIGN KEY ( software_id ) REFERENCES qiita.software( software_id ) ;
-ALTER TABLE qiita.oauth_software ADD CONSTRAINT fk_oauth_software FOREIGN KEY ( client_id ) REFERENCES qiita.oauth_identifiers( client_id ) ;
diff --git a/qiita_db/support_files/patches/35.sql b/qiita_db/support_files/patches/35.sql
deleted file mode 100644
index a6ec2f82c..000000000
--- a/qiita_db/support_files/patches/35.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Dec 5, 2015
--- Adds names to the artifacts
-
-ALTER TABLE qiita.artifact ADD name varchar(35) NOT NULL DEFAULT 'noname';
diff --git a/qiita_db/support_files/patches/36.sql b/qiita_db/support_files/patches/36.sql
deleted file mode 100644
index 2a59c291e..000000000
--- a/qiita_db/support_files/patches/36.sql
+++ /dev/null
@@ -1,485 +0,0 @@
--- Jan 25, 2016
--- Move the can_be_submitted_to_XX columns to the artifact type
-
-ALTER TABLE qiita.artifact DROP COLUMN can_be_submitted_to_ebi;
-ALTER TABLE qiita.artifact DROP COLUMN can_be_submitted_to_vamps;
-ALTER TABLE qiita.artifact_type ADD can_be_submitted_to_ebi bool DEFAULT 'FALSE' NOT NULL;
-ALTER TABLE qiita.artifact_type ADD can_be_submitted_to_vamps bool DEFAULT 'FALSE' NOT NULL;
-
-UPDATE qiita.artifact_type SET can_be_submitted_to_ebi = TRUE, can_be_submitted_to_vamps = TRUE
- WHERE artifact_type = 'Demultiplexed';
-
--- Jan 26, 2016
--- Relate the artifact types with the filepath types that they support
-
-CREATE TABLE qiita.artifact_type_filepath_type (
- artifact_type_id bigint NOT NULL,
- filepath_type_id bigint NOT NULL,
- required bool DEFAULT 'TRUE' NOT NULL,
- CONSTRAINT idx_artifact_type_filepath_type PRIMARY KEY ( artifact_type_id, filepath_type_id )
- ) ;
-
-CREATE INDEX idx_artifact_type_filepath_type_at ON qiita.artifact_type_filepath_type ( artifact_type_id ) ;
-CREATE INDEX idx_artifact_type_filepath_type_ft ON qiita.artifact_type_filepath_type ( filepath_type_id ) ;
-ALTER TABLE qiita.artifact_type_filepath_type ADD CONSTRAINT fk_artifact_type_filepath_type_at FOREIGN KEY ( artifact_type_id ) REFERENCES qiita.artifact_type( artifact_type_id ) ;
-ALTER TABLE qiita.artifact_type_filepath_type ADD CONSTRAINT fk_artifact_type_filepath_type_ft FOREIGN KEY ( filepath_type_id ) REFERENCES qiita.filepath_type( filepath_type_id ) ;
-
-INSERT INTO qiita.artifact_type_filepath_type (artifact_type_id, filepath_type_id, required) VALUES
- -- Artifact Type: SFF - Filepath Types: raw_sff (required)
- (1, 17, TRUE),
- -- Artifact Type: FASTA_Sanger - Filepath Types: raw_fasta (required)
- (2, 18, TRUE),
- -- Artifact Type: FASTQ - Filepath Types: raw_forward_seqs (required), raw_reverse_seqs (optional), raw_barcodes (required)
- (3, 1, TRUE), (3, 2, FALSE), (3, 3, TRUE),
- -- Artifact Type: FASTA - Filepath Types: raw_fasta (required), raw_qual (required)
- (4, 18, TRUE), (4, 19, TRUE),
- -- Artifact Type: per_sample_FASTQ - Filepath Types: raw_forward_seqs (required), raw_reverse_seqs (optional)
- (5, 1, TRUE), (5, 2, FALSE),
- -- Artifact Type: Demultiplexed - Filepath Types: preprocessed_fasta (required), preprocessed_fastq (required), preprocessed_demux (optional), log (optional)
- (6, 4, TRUE), (6, 5, TRUE), (6, 6, FALSE), (6, 13, FALSE),
- -- Artifact Type: BIOM - Filepath Types: biom (required), directory (optional), log (optional)
- (7, 7, TRUE), (7, 8, FALSE), (7, 13, FALSE);
-
--- Feb 3, 2016
--- Add default workflows and store user workflows in the DB
-
--- The table command_parameter had as primary key the tuple (command_id, parameter_name)
--- This was enough previously, but now we need to reference this table so we can
--- link the parameter with the artifact types in case that the type of the
--- parameter is "artifact". Thus, we change the primary key to be a single bigserial
--- for simplicity
-ALTER TABLE qiita.command_parameter DROP CONSTRAINT idx_command_parameter_0;
-ALTER TABLE qiita.command_parameter ADD command_parameter_id bigserial NOT NULL;
-ALTER TABLE qiita.command_parameter ADD CONSTRAINT pk_command_parameter PRIMARY KEY ( command_parameter_id ) ;
-ALTER TABLE qiita.command_parameter ADD CONSTRAINT idx_command_parameter_0 UNIQUE ( command_id, parameter_name ) ;
-
--- In case that the parameter is of type "artifact" this table holds which
--- specific set of artifact types the command accepts
-CREATE TABLE qiita.parameter_artifact_type (
- command_parameter_id bigserial NOT NULL,
- artifact_type_id bigint NOT NULL,
- CONSTRAINT idx_parameter_artifact_type PRIMARY KEY ( command_parameter_id, artifact_type_id )
- ) ;
-CREATE INDEX idx_parameter_artifact_type_param_id ON qiita.parameter_artifact_type ( command_parameter_id ) ;
-CREATE INDEX idx_parameter_artifact_type_type_id ON qiita.parameter_artifact_type ( artifact_type_id ) ;
-ALTER TABLE qiita.parameter_artifact_type ADD CONSTRAINT fk_parameter_artifact_type FOREIGN KEY ( command_parameter_id ) REFERENCES qiita.command_parameter( command_parameter_id ) ;
-ALTER TABLE qiita.parameter_artifact_type ADD CONSTRAINT fk_parameter_artifact_type_0 FOREIGN KEY ( artifact_type_id ) REFERENCES qiita.artifact_type( artifact_type_id ) ;
-
--- In case that the command outputs a set of artifacts (including len(set) = 1),
--- this table holds which are the types of those artifacts
-CREATE TABLE qiita.command_output (
- command_output_id bigserial NOT NULL,
- name varchar NOT NULL,
- command_id bigint NOT NULL,
- artifact_type_id bigint NOT NULL,
- CONSTRAINT pk_command_output PRIMARY KEY ( command_output_id ),
- CONSTRAINT idx_command_output UNIQUE ( name, command_id )
- ) ;
-CREATE INDEX idx_command_output_cmd_id ON qiita.command_output ( command_id ) ;
-CREATE INDEX idx_command_output_type_id ON qiita.command_output ( artifact_type_id ) ;
-ALTER TABLE qiita.command_output ADD CONSTRAINT fk_command_output FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-ALTER TABLE qiita.command_output ADD CONSTRAINT fk_command_output_0 FOREIGN KEY ( artifact_type_id ) REFERENCES qiita.artifact_type( artifact_type_id ) ;
-
--- The default workflows of a software (plugin) are represented using a graph in
--- which the nodes are the commands and default parameter set used
--- and edges represent the job dependency.
-
--- The table default_workflow links each software with its set of default workflows
-CREATE TABLE qiita.default_workflow (
- default_workflow_id bigserial NOT NULL,
- software_id bigint NOT NULL,
- name varchar NOT NULL,
- CONSTRAINT pk_default_workflow PRIMARY KEY ( default_workflow_id ),
- CONSTRAINT idx_default_workflow UNIQUE ( software_id, name )
- ) ;
-CREATE INDEX idx_default_workflow_software ON qiita.default_workflow ( software_id ) ;
-ALTER TABLE qiita.default_workflow ADD CONSTRAINT fk_default_workflow_software FOREIGN KEY ( software_id ) REFERENCES qiita.software( software_id ) ;
-
--- The table default_workflow_node stores the nodes information from the
--- workflow graph
-CREATE TABLE qiita.default_workflow_node (
- default_workflow_node_id bigserial NOT NULL,
- default_workflow_id bigint NOT NULL,
- command_id bigint NOT NULL,
- default_parameter_set_id bigint NOT NULL,
- CONSTRAINT pk_default_workflow_command PRIMARY KEY ( default_workflow_node_id )
- ) ;
-CREATE INDEX idx_default_workflow_command_cmd_id ON qiita.default_workflow_node ( command_id ) ;
-CREATE INDEX idx_default_workflow_command_dflt_param_id ON qiita.default_workflow_node ( default_parameter_set_id ) ;
-CREATE INDEX idx_default_workflow_command_dflt_wf_id ON qiita.default_workflow_node ( default_workflow_id ) ;
-ALTER TABLE qiita.default_workflow_node ADD CONSTRAINT fk_default_workflow_command FOREIGN KEY ( command_id ) REFERENCES qiita.software_command( command_id ) ;
-ALTER TABLE qiita.default_workflow_node ADD CONSTRAINT fk_default_workflow_command_0 FOREIGN KEY ( default_parameter_set_id ) REFERENCES qiita.default_parameter_set( default_parameter_set_id ) ;
-ALTER TABLE qiita.default_workflow_node ADD CONSTRAINT fk_default_workflow_command_1 FOREIGN KEY ( default_workflow_id ) REFERENCES qiita.default_workflow( default_workflow_id ) ;
-
--- The table default_workflow_edge stores the edge of the workflow graph
-CREATE TABLE qiita.default_workflow_edge (
- default_workflow_edge_id bigserial NOT NULL,
- parent_id bigint NOT NULL,
- child_id bigint NOT NULL,
- CONSTRAINT pk_default_workflow_edge PRIMARY KEY ( default_workflow_edge_id )
- ) ;
-CREATE INDEX idx_default_workflow_edge_parent ON qiita.default_workflow_edge ( parent_id ) ;
-CREATE INDEX idx_default_workflow_edge_child ON qiita.default_workflow_edge ( child_id ) ;
-ALTER TABLE qiita.default_workflow_edge ADD CONSTRAINT fk_default_workflow_edge FOREIGN KEY ( parent_id ) REFERENCES qiita.default_workflow_node( default_workflow_node_id ) ;
-ALTER TABLE qiita.default_workflow_edge ADD CONSTRAINT fk_default_workflow_edge_0 FOREIGN KEY ( child_id ) REFERENCES qiita.default_workflow_node( default_workflow_node_id ) ;
-
--- The table default_workflow_edge_connections stores the metadata information
--- about the edges. Specifically, it stores which outputs are connected to
--- which inputs across commands in the default workflow command.
-CREATE TABLE qiita.default_workflow_edge_connections (
- default_workflow_edge_id bigint NOT NULL,
- parent_output_id bigint NOT NULL,
- child_input_id bigint NOT NULL,
- CONSTRAINT idx_default_workflow_edge_connections PRIMARY KEY ( default_workflow_edge_id, parent_output_id, child_input_id )
- ) ;
-CREATE INDEX idx_default_workflow_edge_connections_parent ON qiita.default_workflow_edge_connections ( parent_output_id ) ;
-CREATE INDEX idx_default_workflow_edge_connections_child ON qiita.default_workflow_edge_connections ( child_input_id ) ;
-CREATE INDEX idx_default_workflow_edge_connections_edge ON qiita.default_workflow_edge_connections ( default_workflow_edge_id ) ;
-ALTER TABLE qiita.default_workflow_edge_connections ADD CONSTRAINT fk_default_workflow_edge_connections FOREIGN KEY ( parent_output_id ) REFERENCES qiita.command_output( command_output_id ) ;
-ALTER TABLE qiita.default_workflow_edge_connections ADD CONSTRAINT fk_default_workflow_edge_connections_0 FOREIGN KEY ( child_input_id ) REFERENCES qiita.command_parameter( command_parameter_id ) ;
-ALTER TABLE qiita.default_workflow_edge_connections ADD CONSTRAINT fk_default_workflow_edge_connections_1 FOREIGN KEY ( default_workflow_edge_id ) REFERENCES qiita.default_workflow_edge( default_workflow_edge_id ) ;
-
--- The table qiita.processing_job_workflow holds the workflow actually executed
--- by the user. We allow the user to name the workflow for easier reference
-CREATE TABLE qiita.processing_job_workflow (
- processing_job_workflow_id bigserial NOT NULL,
- email varchar NOT NULL,
- name varchar ,
- CONSTRAINT pk_processing_job_workflow PRIMARY KEY ( processing_job_workflow_id )
- ) ;
-CREATE INDEX idx_processing_job_workflow ON qiita.processing_job_workflow ( email ) ;
-ALTER TABLE qiita.processing_job_workflow ADD CONSTRAINT fk_processing_job_workflow FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ;
-
--- The processing_job_workflow_root connects the processing_job_workflow with
--- it's initial set of jobs. From this jobs, we can trace down the rest of the
--- workflow
-CREATE TABLE qiita.processing_job_workflow_root (
- processing_job_workflow_id bigint NOT NULL,
- processing_job_id uuid NOT NULL,
- CONSTRAINT idx_processing_job_workflow_root_0 PRIMARY KEY ( processing_job_workflow_id, processing_job_id )
- ) ;
-CREATE INDEX idx_processing_job_workflow_root_wf ON qiita.processing_job_workflow_root ( processing_job_workflow_id ) ;
-CREATE INDEX idx_processing_job_workflow_root_job ON qiita.processing_job_workflow_root ( processing_job_id ) ;
-ALTER TABLE qiita.processing_job_workflow_root ADD CONSTRAINT fk_processing_job_workflow_root_job FOREIGN KEY ( processing_job_workflow_id ) REFERENCES qiita.processing_job_workflow( processing_job_workflow_id ) ;
-ALTER TABLE qiita.processing_job_workflow_root ADD CONSTRAINT fk_processing_job_workflow_root_wf FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-
--- the table parent_processing_job stores the edges between the
--- different processing jobs. The specific connections are encoded in the
--- processing_job's command_parameters attribute (JSON)
-CREATE TABLE qiita.parent_processing_job (
- parent_id uuid NOT NULL,
- child_id uuid NOT NULL,
- CONSTRAINT idx_parent_processing_job PRIMARY KEY ( parent_id, child_id )
- ) ;
-CREATE INDEX idx_parent_processing_job_parent ON qiita.parent_processing_job ( parent_id ) ;
-CREATE INDEX idx_parent_processing_job_child ON qiita.parent_processing_job ( child_id ) ;
-ALTER TABLE qiita.parent_processing_job ADD CONSTRAINT fk_parent_processing_job_parent FOREIGN KEY ( parent_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-ALTER TABLE qiita.parent_processing_job ADD CONSTRAINT fk_parent_processing_job_child FOREIGN KEY ( child_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-
--- The workflows need to connect the different outputs of a processing job with
--- the inputs of the next processing job. The following table holds which
--- artifact was generated in each named output. So we can backtrack and perform
--- the correct connections when executing the workflow. Note that this information
--- is only needed for the wokflows, so there is no necessity to populate the
--- table with all the artifacts that has been already generated. Furthermore,
--- there is no way to retrieve this information once the job has been executed
--- and be 100% sure that we are connecting the jobs and the artifacts correctly
-CREATE TABLE qiita.artifact_output_processing_job (
- artifact_id bigint NOT NULL,
- processing_job_id uuid NOT NULL,
- command_output_id bigint NOT NULL
- ) ;
-CREATE INDEX idx_artifact_output_processing_job_artifact ON qiita.artifact_output_processing_job ( artifact_id ) ;
-CREATE INDEX idx_artifact_output_processing_job_job ON qiita.artifact_output_processing_job ( processing_job_id ) ;
-CREATE INDEX idx_artifact_output_processing_job_cmd ON qiita.artifact_output_processing_job ( command_output_id ) ;
-ALTER TABLE qiita.artifact_output_processing_job ADD CONSTRAINT fk_artifact_output_processing_job_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ) ;
-ALTER TABLE qiita.artifact_output_processing_job ADD CONSTRAINT fk_artifact_output_processing_job_job FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-ALTER TABLE qiita.artifact_output_processing_job ADD CONSTRAINT fk_artifact_output_processing_job_cmd FOREIGN KEY ( command_output_id ) REFERENCES qiita.command_output( command_output_id ) ;
-
--- In order to successfully represent the current status of a job,
--- we need to identify if the job is part of a workflow in construction
--- and if the job is waiting for a previous job to finish in order to be executed
-INSERT INTO qiita.processing_job_status (processing_job_status, processing_job_status_description)
- VALUES ('in_construction', 'The job is one of the source nodes of a workflow that is in construction'),
- ('waiting', 'The job is waiting for a previous job in the workflow to be completed in order to be executed.');
-
--- In order to keep better track of the jobs that we are waiting for
--- we add another json to the processing_job table
-ALTER TABLE qiita.processing_job ADD pending json ;
-
--- Populate the newly created tables
-DO $do$
-DECLARE
- in_slq_param_id bigint;
- in_sl_param_id bigint;
- in_po_param_id bigint;
- dflt_slq_id bigint;
- dflt_sl_id bigint;
- dflt_per_sample_id bigint;
- dflt_po_id bigint;
-BEGIN
- -- Add the artifact type information for the input parameters for the commands
- -- command_id = 1 -> Split libraries FASTQ
- SELECT command_parameter_id FROM qiita.command_parameter
- WHERE command_id = 1 AND parameter_name = 'input_data'
- INTO in_slq_param_id;
-
- -- Split libraries FASTQ supports FASTQ (3) and per_sample_FASTQ (5)
- INSERT INTO qiita.parameter_artifact_type (command_parameter_id, artifact_type_id)
- VALUES (in_slq_param_id, 3), (in_slq_param_id, 5);
-
- -- command_id = 2 -> Split libraries
- SELECT command_parameter_id FROM qiita.command_parameter
- WHERE command_id = 2 AND parameter_name = 'input_data'
- INTO in_sl_param_id;
-
- -- Split libraries supports SFF (1), FASTA_Sanger (2), FASTA (4)
- INSERT INTO qiita.parameter_artifact_type (command_parameter_id, artifact_type_id)
- VALUES (in_sl_param_id, 1), (in_sl_param_id, 2), (in_sl_param_id, 4);
-
- -- command_id = 3 -> Pick closed-reference OTUs
- SELECT command_parameter_id FROM qiita.command_parameter
- WHERE command_id = 3 AND parameter_name = 'input_data'
- INTO in_po_param_id;
-
- -- Pick closed-reference OTUs supports Demultiplexed (6)
- INSERT INTO qiita.parameter_artifact_type (command_parameter_id, artifact_type_id)
- VALUES (in_po_param_id, 6);
-
-
- -- Add the output information for each command
- INSERT INTO qiita.command_output (name, command_id, artifact_type_id)
- VALUES ('demultiplexed', 1, 6), ('demultiplexed', 2, 6), ('OTU table', 3, 7);
-
-
- -- Add the default workflow for the target gene pipeline.
- -- We are going to create three different default workflows for the
- -- target gene plugin:
- -- 1) FASTQ upstream workflow: split_libraries_fastq.py + OTU picking
- -- 2) FASTA upstream workflow: split_libraries.py + OTU picking
- -- 3) Per sample FASTQ upstream workflow:
- -- split_libraries_fastq.py + OTU picking using per sample fastq parameters
- -- In order to choose the default parameters set, we are going to choose
- -- the one with minimum id. The reason for this is that in the live system
- -- there are default parameter set that were added manually, so we don't
- -- know the ids for those parameter set. Note that we do know
- -- the command id because they're inserted in patch 33.sql, and that is
- -- the only way of adding commands at this point.
-
- -- Insert default workflow
- INSERT INTO qiita.default_workflow (software_id, name)
- VALUES (1, 'FASTQ upstream workflow'),
- (1, 'FASTA upstream workflow'),
- (1, 'Per sample FASTQ upstream workflow');
-
- -- Retrieve all the ids of the default parameter set that we need
- SELECT min(default_parameter_set_id)
- FROM qiita.default_parameter_set
- WHERE command_id = 1
- INTO dflt_slq_id;
-
- SELECT min(default_parameter_set_id)
- FROM qiita.default_parameter_set
- WHERE command_id = 2
- INTO dflt_sl_id;
-
- SELECT min(default_parameter_set_id)
- FROM qiita.default_parameter_set
- WHERE command_id = 1 AND parameter_set->>'barcode_type' = 'not-barcoded'
- INTO dflt_per_sample_id;
-
- SELECT min(default_parameter_set_id)
- FROM qiita.default_parameter_set
- WHERE command_id = 3
- INTO dflt_po_id;
-
- -- We need 2 nodes per workflow -> 6 nodes
- INSERT INTO qiita.default_workflow_node (default_workflow_id, command_id, default_parameter_set_id)
- VALUES (1, 1, dflt_slq_id), (1, 3, dflt_po_id),
- (2, 2, dflt_sl_id), (2, 3, dflt_po_id),
- (3, 1, dflt_per_sample_id), (3, 3, dflt_po_id);
-
- -- We need 1 edge per workflow -> 3 edges
- INSERT INTO qiita.default_workflow_edge (parent_id, child_id)
- VALUES (1, 2), (3, 4), (5, 6);
-
- INSERT INTO qiita.default_workflow_edge_connections (default_workflow_edge_id, parent_output_id, child_input_id)
- VALUES (1, 1, in_po_param_id),
- (2, 2, in_po_param_id),
- (3, 1, in_po_param_id);
-
-END $do$;
-
--- Create a function to return all the edges of a processing_job_workflow
-CREATE FUNCTION qiita.get_processing_workflow_edges(wf_id bigint) RETURNS SETOF qiita.parent_processing_job AS $$
-BEGIN
- RETURN QUERY WITH RECURSIVE edges AS (
- SELECT parent_id, child_id
- FROM qiita.parent_processing_job
- WHERE parent_id IN (SELECT processing_job_id
- FROM qiita.processing_job_workflow_root
- WHERE processing_job_workflow_id = wf_id)
- UNION
- SELECT p.parent_id, p.child_id
- FROM qiita.parent_processing_job p
- JOIN edges e ON (e.child_id = p.parent_id)
- )
- SELECT DISTINCT parent_id, child_id
- FROM edges;
-END
-$$ LANGUAGE plpgsql;
-
--- Mar 7, 2016
--- Add reference_id and command_id of the input file to jobs
-
--- Changes to tables
-
-ALTER TABLE qiita.job ADD input_file_reference_id bigint;
-
-ALTER TABLE qiita.job ADD input_file_software_command_id bigint;
-
-CREATE INDEX idx_job_0 ON qiita.job ( input_file_reference_id ) ;
-
-CREATE INDEX idx_job_1 ON qiita.job ( input_file_software_command_id ) ;
-
-ALTER TABLE qiita.job ADD CONSTRAINT fk_job_reference FOREIGN KEY ( input_file_reference_id ) REFERENCES qiita.reference( reference_id );
-
-ALTER TABLE qiita.job ADD CONSTRAINT fk_job_software_command FOREIGN KEY ( input_file_software_command_id ) REFERENCES qiita.software_command( command_id );
-
--- Change values:
--- input_file_reference_id can be = 1 as it's only needed for job processing
--- input_file_software_command_id = 3 as it's close reference picking.
-
-UPDATE qiita.job SET input_file_reference_id = 1;
-ALTER TABLE qiita.job ALTER COLUMN input_file_reference_id SET NOT NULL;
-
-UPDATE qiita.job SET input_file_software_command_id = 3;
-ALTER TABLE qiita.job ALTER COLUMN input_file_software_command_id SET NOT NULL;
-
--- Mar 12, 2016
--- Add software_type table. This new table allows us to have different types of
--- plugins. Here, we will introduce a new type, the "type plugin", and define
--- the previous type as the "processing plugin" type. The new group "type plugin"
--- define plugins that do not perform any processing on the artifacts but they
--- are able to validate that they're correct and generate their summary page.
--- These new plugins are special. They are not directly visible by the end Qiita
--- user but they are useful to plugin developers so they do not need to re-define
--- types if they already exist. This way, multiple plugins can share the same
--- type of artifacts without depending in another "processing" plugin.
-
--- Add the type HTML summary to the list of supported filepath types
--- Note that we are not linking this filepath type with any specific artifact
--- type. The reason is that all artifacts should have it and users are not
--- allowed to upload this file, since it is internally generated
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('html_summary');
-
--- Create the new table to hold the software types
-CREATE TABLE qiita.software_type (
- software_type_id bigserial NOT NULL,
- software_type varchar NOT NULL,
- description varchar NOT NULL,
- CONSTRAINT pk_software_type PRIMARY KEY ( software_type_id )
- ) ;
-
--- Add the FK to the software table
-ALTER TABLE qiita.software ADD software_type_id bigint;
-CREATE INDEX idx_software_type ON qiita.software ( software_type_id ) ;
-ALTER TABLE qiita.software ADD CONSTRAINT fk_software_software_type FOREIGN KEY ( software_type_id ) REFERENCES qiita.software_type( software_type_id ) ;
-
--- The software (plugins) of type "type plugin" need to hold which types do they define
-CREATE TABLE qiita.software_artifact_type (
- software_id bigint NOT NULL,
- artifact_type_id bigint NOT NULL,
- CONSTRAINT idx_software_artifact_type PRIMARY KEY ( software_id, artifact_type_id )
- ) ;
-CREATE INDEX idx_software_artifact_type_artifact ON qiita.software_artifact_type ( artifact_type_id ) ;
-CREATE INDEX idx_software_artifact_type_software ON qiita.software_artifact_type ( software_id ) ;
-COMMENT ON TABLE qiita.software_artifact_type IS 'In case that the software is of type "type plugin", it holds the artifact types that such software can validate and generate the summary.';
-ALTER TABLE qiita.software_artifact_type ADD CONSTRAINT fk_software_artifact_type_at FOREIGN KEY ( artifact_type_id ) REFERENCES qiita.artifact_type( artifact_type_id ) ;
-ALTER TABLE qiita.software_artifact_type ADD CONSTRAINT fk_software_artifact_type_sw FOREIGN KEY ( software_id ) REFERENCES qiita.software( software_id ) ;
-
--- The new type of plugins have a new command, create-artifact, that given
--- a prep template, the artifact type and the files to be added, validate the
--- files and perform any needed correction to add the files to the system. An
--- example of this processing will be adding new BIOM tables. With the information
--- in the prep template, the plugin can rename the samples in the biom table
--- to match the sample names in the prep template.
-CREATE TABLE qiita.prep_template_processing_job (
- prep_template_id bigint NOT NULL,
- processing_job_id uuid NOT NULL,
- CONSTRAINT idx_prep_template_processing_job PRIMARY KEY ( prep_template_id, processing_job_id )
- ) ;
-CREATE INDEX idx_prep_template_processing_job_pt_id ON qiita.prep_template_processing_job ( prep_template_id ) ;
-CREATE INDEX idx_prep_template_processing_job_job ON qiita.prep_template_processing_job ( processing_job_id ) ;
-ALTER TABLE qiita.prep_template_processing_job ADD CONSTRAINT fk_prep_template_processing_job_pt FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id ) ;
-ALTER TABLE qiita.prep_template_processing_job ADD CONSTRAINT fk_prep_template_processing_job_job FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-
--- Populate the table software_type with the 2 types of plugins
-INSERT INTO qiita.software_type (software_type, description)
- VALUES ('artifact transformation', 'A plugin that performs some kind of processing/transformation/manipulation over an artifact.'),
- ('artifact definition', 'A plugin that defines new artifact types.');
-
--- All the software present in the system belong to type 1 (artifact transformation)
-UPDATE qiita.software SET software_type_id = 1;
--- Setting up NOT NULL attribute here since the value was null until the previous statement
-ALTER TABLE qiita.software ALTER COLUMN software_type_id SET NOT NULL;
-
--- We are going to create 2 new type plugins.
--- The first one will define the type BIOM, while the other one will define
--- all the types needed for TARGET_GENE. This separation is better since BIOM
--- is a special type that even Qiita understands since it is the input for
--- analysis and almost all plugins will need. However, the rest of the types
--- are specific for the target gene plugin.
-INSERT INTO qiita.software (name, version, description, environment_script, start_script, software_type_id)
- VALUES ('BIOM type', '2.1.4 - Qiime2', 'The Biological Observation Matrix format', 'source ~/virtualenv/python2.7/bin/activate; export PATH=$HOME/miniconda3/bin/:$PATH; . activate qtp-biom', 'start_biom', 2),
- ('Target Gene type', '0.1.0', 'Target gene artifact types plugin', 'source ~/virtualenv/python2.7/bin/activate; export PATH=$HOME/miniconda3/bin/:$PATH; source activate qiita', 'start_target_gene_types', 2);
--- Add BIOM publication
-INSERT INTO qiita.publication (doi, pubmed_id) VALUES ('10.1186/2047-217X-1-7', '23587224');
-INSERT INTO qiita.software_publication (software_id, publication_doi) VALUES (2, '10.1186/2047-217X-1-7');
-
--- Add the commands - these will not be visible by the user as they're used internally
-INSERT INTO qiita.software_command (software_id, name, description) VALUES
- -- This will have the ID 4
- (2, 'Validate', 'Validates a new artifact of type BIOM'),
- -- This will have the ID 5
- (2, 'Generate HTML summary', 'Generates the HTML summary of a BIOM artifact'),
- -- This will have the ID 6
- (3, 'Validate', 'Validates a new artifact of the given target gene type'),
- -- This will have the ID 7
- (3, 'Generate HTML summary', 'Generates the HTML summary of a given target gene type artifact');
-
--- Add the parameters - in this case all are required and they will be filled
--- internally by the system, so there is no need to populate the default_parameter_set
--- table, because there are no parameters that are not required.
-INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required)
- VALUES (4, 'template', 'prep_template', True),
- (4, 'files', 'string', True),
- (4, 'artifact_type', 'string', True),
- (5, 'input_data', 'artifact', True),
- (6, 'template', 'prep_template', True),
- (6, 'files', 'string', True),
- (6, 'artifact_type', 'string', True),
- (7, 'input_data', 'artifact', True);
-
--- Relate the artifact_type with the software that defines it
-DO $do$
-DECLARE
- biom_id bigint;
- at_id bigint;
-BEGIN
- -- First the BIOM
- SELECT artifact_type_id FROM qiita.artifact_type WHERE artifact_type = 'BIOM' INTO biom_id;
- INSERT INTO qiita.software_artifact_type (software_id, artifact_type_id)
- VALUES (2, biom_id);
-
- -- The the rest
- FOR at_id IN
- SELECT artifact_type_id FROM qiita.artifact_type WHERE artifact_type <> 'BIOM'
- LOOP
- INSERT INTO qiita.software_artifact_type (software_id, artifact_type_id)
- VALUES (3, at_id);
- END LOOP;
-END $do$;
-
--- Mar 28, 2016
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('tgz');
diff --git a/qiita_db/support_files/patches/37.sql b/qiita_db/support_files/patches/37.sql
deleted file mode 100644
index 19380436a..000000000
--- a/qiita_db/support_files/patches/37.sql
+++ /dev/null
@@ -1,36 +0,0 @@
--- Apr 18, 2016
--- Adding phred_offset to split libraries
-
-DO $do$
-DECLARE
- cmd_id bigint;
-BEGIN
- -- selecting command_id of interest
- SELECT command_id FROM qiita.software_command WHERE name = 'Split libraries FASTQ' INTO cmd_id;
-
- -- adding new parameter
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (cmd_id, 'phred_offset', 'string', False, '');
-
- -- updating all current artifacts that were generated with this command
- UPDATE qiita.artifact
- SET command_parameters = (
- substring(command_parameters::text FROM 0 FOR char_length(command_parameters::text)) || ',"phred_offset":""}'
- )::json
- WHERE command_id=cmd_id;
-
- -- updating the default_parameter_set
- UPDATE qiita.default_parameter_set
- SET parameter_set = (
- substring(parameter_set::text FROM 0 FOR char_length(parameter_set::text)) || ',"phred_offset":""}'
- )::json
- WHERE command_id=cmd_id;
-
- -- inserting new possible default_parameter_sets
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (cmd_id, 'per sample FASTQ defaults, phred_offset 33',
- '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"not-barcoded","max_barcode_errors":1.5,"phred_offset":"33"}'::json);
- INSERT INTO qiita.default_parameter_set (command_id, parameter_set_name, parameter_set)
- VALUES (cmd_id, 'per sample FASTQ defaults, phred_offset 64',
- '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"not-barcoded","max_barcode_errors":1.5,"phred_offset":"64"}'::json);
-END $do$;
diff --git a/qiita_db/support_files/patches/38.sql b/qiita_db/support_files/patches/38.sql
deleted file mode 100644
index 03c888955..000000000
--- a/qiita_db/support_files/patches/38.sql
+++ /dev/null
@@ -1,5 +0,0 @@
--- Jun 3, 2016
--- Removing intermidiary tables for the sample and prep info files
-
-DROP TABLE qiita.study_sample_columns;
-DROP TABLE qiita.prep_columns;
diff --git a/qiita_db/support_files/patches/39.sql b/qiita_db/support_files/patches/39.sql
deleted file mode 100644
index 1650cee44..000000000
--- a/qiita_db/support_files/patches/39.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Sep 7, 2016
--- Adding a is numeric function to avoid problems with texts
--- http://stackoverflow.com/a/16206123
-
-CREATE OR REPLACE FUNCTION isnumeric(text) RETURNS BOOLEAN AS $$
-DECLARE x NUMERIC;
-BEGIN
- x = $1::NUMERIC;
- RETURN TRUE;
-EXCEPTION WHEN others THEN
- RETURN FALSE;
-END;
-$$
-STRICT
-LANGUAGE plpgsql IMMUTABLE;
diff --git a/qiita_db/support_files/patches/4.sql b/qiita_db/support_files/patches/4.sql
deleted file mode 100644
index 43754a9c1..000000000
--- a/qiita_db/support_files/patches/4.sql
+++ /dev/null
@@ -1 +0,0 @@
-INSERT INTO qiita.data_directory (data_type, mountpoint, subdirectory, active) VALUES ('templates', 'templates', '', true);
diff --git a/qiita_db/support_files/patches/40.sql b/qiita_db/support_files/patches/40.sql
deleted file mode 100644
index 1d96bbece..000000000
--- a/qiita_db/support_files/patches/40.sql
+++ /dev/null
@@ -1,76 +0,0 @@
--- Sep 21, 2016
--- Adding active column to the software and software command table to be able
--- to disallow plugins and/or individual software commands
-
-ALTER TABLE qiita.software ADD active bool DEFAULT 'False' NOT NULL;
-
-ALTER TABLE qiita.software_command ADD active bool DEFAULT 'True' NOT NULL;
-
--- Add function to set a key in a JSON value
--- Adapted from http://stackoverflow.com/a/23500670/3746629
-CREATE OR REPLACE FUNCTION qiita.json_object_set_key(
- "json" json,
- "key_to_set" TEXT,
- "value_to_set" anyelement
-)
- RETURNS json
- LANGUAGE sql
- IMMUTABLE
- STRICT
-AS $function$
-SELECT concat('{', string_agg(to_json("key") || ':' || "value", ','), '}')::json
- FROM (SELECT *
- FROM json_each("json")
- WHERE "key" <> "key_to_set"
- UNION ALL
- SELECT "key_to_set", to_json("value_to_set")) AS "fields"
-$function$;
-
--- Change the phred_offset from string to choice
-DO $do$
-DECLARE
- cmd_id bigint;
- dflt_p RECORD;
- a_vals RECORD;
- j_vals RECORD;
-BEGIN
- -- select command id of interest
- SELECT command_id FROM qiita.software_command WHERE name = 'Split libraries FASTQ' INTO cmd_id;
-
- -- Update the phred_offset parameter type
- UPDATE qiita.command_parameter SET parameter_type = 'choice:["auto", "33", "64"]', default_value = 'auto'
- WHERE parameter_name = 'phred_offset' AND command_id = cmd_id;
-
- -- Update all the default parameter sets to use "auto" instead of ""
- FOR dflt_p IN
- SELECT *
- FROM qiita.default_parameter_set
- WHERE command_id = cmd_id AND parameter_set->>'phred_offset' = ''
- LOOP
- UPDATE qiita.default_parameter_set
- SET parameter_set = qiita.json_object_set_key(dflt_p.parameter_set, 'phred_offset', 'auto'::varchar)
- WHERE default_parameter_set_id = dflt_p.default_parameter_set_id;
- END LOOP;
-
- -- Update all current artifacts that have been generated with this command
- FOR a_vals IN
- SELECT *
- FROM qiita.artifact
- WHERE command_id = cmd_id AND command_parameters->>'phred_offset' = ''
- LOOP
- UPDATE qiita.artifact
- SET command_parameters = qiita.json_object_set_key(a_vals.command_parameters, 'phred_offset', 'auto'::varchar)
- WHERE artifact_id = a_vals.artifact_id;
- END LOOP;
-
- -- Update all the jobs that have been using this parameter set
- FOR j_vals IN
- SELECT *
- FROM qiita.processing_job
- WHERE command_id = cmd_id AND command_parameters->>'phred_offset' = ''
- LOOP
- UPDATE qiita.processing_job
- SET command_parameters = qiita.json_object_set_key(j_vals.command_parameters, 'phred_offset', 'auto'::varchar)
- WHERE processing_job_id = j_vals.processing_job_id;
- END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/41.sql b/qiita_db/support_files/patches/41.sql
deleted file mode 100644
index ab781ee24..000000000
--- a/qiita_db/support_files/patches/41.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Oct 29, 2016
--- Dropping command and reference NULL constraints from jobs so bioms
--- without them can be analyzed
-
-ALTER TABLE qiita.job ALTER COLUMN input_file_reference_id DROP NOT NULL;
-ALTER TABLE qiita.job ALTER COLUMN input_file_software_command_id DROP NOT NULL;
diff --git a/qiita_db/support_files/patches/42.sql b/qiita_db/support_files/patches/42.sql
deleted file mode 100644
index a3c8fb24f..000000000
--- a/qiita_db/support_files/patches/42.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Nov 19, 2016
--- Adding provenance parameter to validate commands
-
-DO $do$
-DECLARE
- cmd RECORD;
-BEGIN
- FOR cmd IN
- SELECT command_id FROM qiita.software_command WHERE name = 'Validate'
- LOOP
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (cmd.command_id, 'provenance', 'string', 'False', NULL);
-
- END LOOP;
-END $do$
diff --git a/qiita_db/support_files/patches/43.sql b/qiita_db/support_files/patches/43.sql
deleted file mode 100644
index 0eb1beb16..000000000
--- a/qiita_db/support_files/patches/43.sql
+++ /dev/null
@@ -1,29 +0,0 @@
--- Nov 30, 2016
--- Adding jobs to parent/child artifacts without them
-
--- returns all artifacts and the jobs that created that are descendants of a_id
-CREATE OR REPLACE FUNCTION qiita.artifact_descendants_with_jobs(a_id bigint) RETURNS TABLE (processing_job_id UUID, input_id bigint, output_id bigint) AS $$
-BEGIN
- IF EXISTS(SELECT * FROM qiita.artifact WHERE artifact_id = a_id) THEN
- RETURN QUERY WITH RECURSIVE root AS (
- SELECT qiita.artifact_processing_job.processing_job_id AS processing_job_id,
- qiita.artifact_processing_job.artifact_id AS input_id,
- qiita.artifact_output_processing_job.artifact_id AS output_id
- FROM qiita.artifact_processing_job
- LEFT JOIN qiita.artifact_output_processing_job USING (processing_job_id)
- WHERE qiita.artifact_processing_job.artifact_id = a_id
- UNION
- SELECT apj.processing_job_id AS processing_job_id,
- apj.artifact_id AS input_id,
- aopj.artifact_id AS output_id
- FROM qiita.artifact_processing_job apj
- LEFT JOIN qiita.artifact_output_processing_job aopj USING (processing_job_id)
- JOIN root r ON (r.output_id = apj.artifact_id)
- )
- SELECT DISTINCT root.processing_job_id, root.input_id, root.output_id
- FROM root
- WHERE root.output_id IS NOT NULL
- ORDER BY root.input_id ASC, root.output_id ASC;
- END IF;
-END
-$$ LANGUAGE plpgsql;
diff --git a/qiita_db/support_files/patches/44.sql b/qiita_db/support_files/patches/44.sql
deleted file mode 100644
index 6ba1cc835..000000000
--- a/qiita_db/support_files/patches/44.sql
+++ /dev/null
@@ -1,13 +0,0 @@
--- Dec 14, 2016
--- Keeping track of the validator jobs
-
-CREATE TABLE qiita.processing_job_validator (
- processing_job_id UUID NOT NULL,
- validator_id UUID NOT NULL,
- artifact_info json,
- CONSTRAINT idx_processing_job_validator PRIMARY KEY ( processing_job_id, validator_id )
- ) ;
-CREATE INDEX idx_processing_job_validator_0 ON qiita.processing_job_validator ( processing_job_id ) ;
-CREATE INDEX idx_processing_job_validator_1 ON qiita.processing_job_validator ( validator_id ) ;
-ALTER TABLE qiita.processing_job_validator ADD CONSTRAINT fk_processing_job_validator_p FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-ALTER TABLE qiita.processing_job_validator ADD CONSTRAINT fk_processing_job_validator_c FOREIGN KEY ( validator_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
diff --git a/qiita_db/support_files/patches/45.sql b/qiita_db/support_files/patches/45.sql
deleted file mode 100644
index 18efd4b25..000000000
--- a/qiita_db/support_files/patches/45.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Dec 15, 2016
--- Making sure there are no duplicated columns, much easier via python
-
-SELECT 42;
diff --git a/qiita_db/support_files/patches/46.sql b/qiita_db/support_files/patches/46.sql
deleted file mode 100644
index 0ccbf8edd..000000000
--- a/qiita_db/support_files/patches/46.sql
+++ /dev/null
@@ -1,19 +0,0 @@
--- Dec 3, 2016
--- Modify qiita.study_publication so studies can have string
--- dois and pubmed ids
-
-
--- dropping PRIMARY KEY ( study_id, publication_doi )
-ALTER TABLE qiita.study_publication DROP CONSTRAINT idx_study_publication_0;
-
--- dropping FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
-ALTER TABLE qiita.study_publication DROP CONSTRAINT fk_study_publication_study;
-
--- dropping FOREIGN KEY ( publication_doi ) REFERENCES qiita.publication( doi )
-ALTER TABLE qiita.study_publication DROP CONSTRAINT fk_study_publication;
-
--- renaming publication_doi to publication
-ALTER TABLE qiita.study_publication RENAME publication_doi TO publication;
-
--- adding a new column so we know if the publication is doi or pubmedid
-ALTER TABLE qiita.study_publication ADD COLUMN is_doi boolean;
diff --git a/qiita_db/support_files/patches/47.sql b/qiita_db/support_files/patches/47.sql
deleted file mode 100644
index 077bb6690..000000000
--- a/qiita_db/support_files/patches/47.sql
+++ /dev/null
@@ -1,5 +0,0 @@
--- Jan 15, 2017
--- Inherit the status of the study to all it's artifacts.
--- This code is much easier using python so check that patch
-
-SELECT 1;
diff --git a/qiita_db/support_files/patches/48.sql b/qiita_db/support_files/patches/48.sql
deleted file mode 100644
index f18e28868..000000000
--- a/qiita_db/support_files/patches/48.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Jan 20, 2017
--- see py file
-
-SELECT 1;
diff --git a/qiita_db/support_files/patches/49.sql b/qiita_db/support_files/patches/49.sql
deleted file mode 100644
index 4b2b3c42a..000000000
--- a/qiita_db/support_files/patches/49.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Jan 27, 2017
--- sequeneces -> sequences
-
-UPDATE qiita.artifact_type SET description = 'Demultiplexed and QC sequences'
- WHERE artifact_type = 'Demultiplexed'
- AND description = 'Demultiplexed and QC sequeneces';
diff --git a/qiita_db/support_files/patches/5.sql b/qiita_db/support_files/patches/5.sql
deleted file mode 100644
index 84cc17198..000000000
--- a/qiita_db/support_files/patches/5.sql
+++ /dev/null
@@ -1,26 +0,0 @@
--- Nov 21, 2014
--- adding new tables to support multiple filepaths for sample/prep templates
-
-CREATE TABLE qiita.prep_template_filepath (
- prep_template_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_prep_template_filepath PRIMARY KEY ( prep_template_id, filepath_id )
- ) ;
-
-CREATE TABLE qiita.sample_template_filepath (
- study_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_sample_template_filepath PRIMARY KEY ( study_id, filepath_id )
- ) ;
-
-ALTER TABLE qiita.prep_template_filepath ADD CONSTRAINT fk_filepath_id FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id ) ;
-
-ALTER TABLE qiita.prep_template_filepath ADD CONSTRAINT fk_prep_template_id FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id ) ;
-
-ALTER TABLE qiita.sample_template_filepath ADD CONSTRAINT fk_study_id FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ;
-
-ALTER TABLE qiita.sample_template_filepath ADD CONSTRAINT fk_filepath_id FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id ) ;
-
--- inserting the new filepath types
-
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('sample_template'), ('prep_template');
diff --git a/qiita_db/support_files/patches/50.sql b/qiita_db/support_files/patches/50.sql
deleted file mode 100644
index f732ef7b5..000000000
--- a/qiita_db/support_files/patches/50.sql
+++ /dev/null
@@ -1,19 +0,0 @@
--- Feb 3, 2017
--- adding study tagging system
-
-CREATE TABLE qiita.study_tags (
- study_tag_id bigserial NOT NULL,
- email varchar NOT NULL,
- study_tag varchar NOT NULL,
- CONSTRAINT pk_study_tag UNIQUE ( study_tag ),
- CONSTRAINT pk_study_tag_id PRIMARY KEY ( study_tag_id )
-) ;
-
-CREATE INDEX idx_study_tag_id ON qiita.study_tags ( study_tag_id ) ;
-ALTER TABLE qiita.study_tags ADD CONSTRAINT fk_study_tags FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email );
-
-CREATE TABLE qiita.per_study_tags (
- study_tag_id bigint NOT NULL,
- study_id bigint NOT NULL,
- CONSTRAINT pk_per_study_tags PRIMARY KEY ( study_tag_id, study_id )
-) ;
diff --git a/qiita_db/support_files/patches/51.sql b/qiita_db/support_files/patches/51.sql
deleted file mode 100644
index 8ad54977e..000000000
--- a/qiita_db/support_files/patches/51.sql
+++ /dev/null
@@ -1,5 +0,0 @@
--- Feb 9, 2017
--- changing format of stored timestamps
--- see python patch
-
-SELECT 1;
diff --git a/qiita_db/support_files/patches/52.sql b/qiita_db/support_files/patches/52.sql
deleted file mode 100644
index 2f32707c5..000000000
--- a/qiita_db/support_files/patches/52.sql
+++ /dev/null
@@ -1,30 +0,0 @@
--- Mar 16, 2017
--- Changing tagging system structure, now study_tag will be the index
-
--- dropping all not required constrints, indexes and columns
-ALTER TABLE qiita.study_tags DROP CONSTRAINT fk_study_tags;
-DROP INDEX qiita.idx_study_tag_id;
-ALTER TABLE qiita.study_tags DROP CONSTRAINT pk_study_tag;
-ALTER TABLE qiita.study_tags DROP CONSTRAINT pk_study_tag_id;
-ALTER TABLE qiita.study_tags DROP COLUMN study_tag_id;
-ALTER TABLE qiita.per_study_tags ADD COLUMN study_tag varchar NOT NULL;
-ALTER TABLE qiita.per_study_tags DROP CONSTRAINT pk_per_study_tags;
-ALTER TABLE qiita.per_study_tags DROP COLUMN study_tag_id;
-
--- adding new restrictions
-ALTER TABLE qiita.study_tags ADD CONSTRAINT pk_study_tags PRIMARY KEY ( study_tag );
-ALTER TABLE qiita.study_tags ADD CONSTRAINT fk_email FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email );
-ALTER TABLE qiita.per_study_tags ADD CONSTRAINT fk_study_tags FOREIGN KEY ( study_tag ) REFERENCES qiita.study_tags( study_tag );
-ALTER TABLE qiita.per_study_tags ADD CONSTRAINT fk_study_id FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id );
-ALTER TABLE qiita.per_study_tags ADD CONSTRAINT pk_per_study_tags PRIMARY KEY ( study_tag, study_id);
-
--- New structure:
--- CREATE TABLE qiita.study_tags (
--- email varchar NOT NULL,
--- study_tag varchar NOT NULL,
--- ) ;
---
--- CREATE TABLE qiita.per_study_tags (
--- study_tag varchar NOT NULL,
--- study_id bigint NOT NULL,
--- ) ;
diff --git a/qiita_db/support_files/patches/53.sql b/qiita_db/support_files/patches/53.sql
deleted file mode 100644
index 0ae05a48c..000000000
--- a/qiita_db/support_files/patches/53.sql
+++ /dev/null
@@ -1,5 +0,0 @@
--- Apr 1, 2017
--- setting visibility of all artifacts to be the most open of the full
--- processing tree
-
-SELECT 42;
diff --git a/qiita_db/support_files/patches/54.sql b/qiita_db/support_files/patches/54.sql
deleted file mode 100644
index 6b78ef0c4..000000000
--- a/qiita_db/support_files/patches/54.sql
+++ /dev/null
@@ -1,123 +0,0 @@
--- Jan 5, 2017
--- Move the analysis to the plugin system. This is a major rewrite of the
--- database backend that supports the analysis pipeline.
--- After exploring the data on the database, we realized that
--- there are a lot of inconsistencies in the data. Unfortunately, this
--- makes the process of transferring the data from the old structure
--- to the new one a bit more challenging, as we will need to handle
--- different special cases. Furthermore, all the information needed is not
--- present in the database, since it requires checking BIOM files. Due to these
--- reason, the vast majority of the data transfer is done in the python patch
--- 51.py
-
--- In this file we are just creating the new data structures. The old
--- datastructure will be dropped in the python patch once all data has been
--- transferred.
-
--- Create the new data structures
-
--- Table that links the analysis with the initial set of artifacts
-CREATE TABLE qiita.analysis_artifact (
- analysis_id bigint NOT NULL,
- artifact_id bigint NOT NULL,
- CONSTRAINT idx_analysis_artifact_0 PRIMARY KEY (analysis_id, artifact_id)
-);
-CREATE INDEX idx_analysis_artifact_analysis ON qiita.analysis_artifact (analysis_id);
-CREATE INDEX idx_analysis_artifact_artifact ON qiita.analysis_artifact (artifact_id);
-ALTER TABLE qiita.analysis_artifact ADD CONSTRAINT fk_analysis_artifact_analysis FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id );
-ALTER TABLE qiita.analysis_artifact ADD CONSTRAINT fk_analysis_artifact_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id );
-
--- Droping the analysis status column cause now it depends on the artifacts
--- status, like the study does.
-ALTER TABLE qiita.analysis DROP COLUMN analysis_status_id;
-
--- Create a table to link the analysis with the jobs that create the initial
--- artifacts
-CREATE TABLE qiita.analysis_processing_job (
- analysis_id bigint NOT NULL,
- processing_job_id uuid NOT NULL,
- CONSTRAINT idx_analysis_processing_job PRIMARY KEY ( analysis_id, processing_job_id )
- ) ;
-
-CREATE INDEX idx_analysis_processing_job_analysis ON qiita.analysis_processing_job ( analysis_id ) ;
-CREATE INDEX idx_analysis_processing_job_pj ON qiita.analysis_processing_job ( processing_job_id ) ;
-ALTER TABLE qiita.analysis_processing_job ADD CONSTRAINT fk_analysis_processing_job FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ;
-ALTER TABLE qiita.analysis_processing_job ADD CONSTRAINT fk_analysis_processing_job_pj FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ;
-
--- Add a logging column in the analysis
-ALTER TABLE qiita.analysis ADD logging_id bigint ;
-CREATE INDEX idx_analysis_0 ON qiita.analysis ( logging_id ) ;
-ALTER TABLE qiita.analysis ADD CONSTRAINT fk_analysis_logging FOREIGN KEY ( logging_id ) REFERENCES qiita.logging( logging_id ) ;
-
--- Alter the software command table to differentiate between commands that
--- apply to the analysis pipeline or commands that apply on the study
--- processing pipeline
-ALTER TABLE qiita.software_command ADD is_analysis bool DEFAULT 'False' NOT NULL;
-
--- We can handle some of the special cases here, so we simplify the work in the
--- python patch
-
--- Special case 1: there are jobs in the database that do not contain
--- any information about the options used to process those parameters.
--- However, these jobs do not have any results and all are marked either
--- as queued or error, although no error log has been saved. Since these
--- jobs are mainly useleess, we are going to remove them from the system
-DELETE FROM qiita.analysis_job
- WHERE job_id IN (SELECT job_id FROM qiita.job WHERE options = '{}');
-DELETE FROM qiita.job WHERE options = '{}';
-
--- Special case 2: there are a fair amount of jobs (719 last time I
--- checked) that are not attached to any analysis. Not sure how this
--- can happen, but these orphan jobs can't be accessed from anywhere
--- in the interface. Remove them from the system. Note that we are
--- unlinking the files but we are not removing them from the filepath
--- table. We will do that on the patch 47.py using the
--- purge_filepaths function, as it will make sure that those files are
--- not used anywhere else
-DELETE FROM qiita.job_results_filepath WHERE job_id IN (
- SELECT job_id FROM qiita.job J WHERE NOT EXISTS (
- SELECT * FROM qiita.analysis_job AJ WHERE J.job_id = AJ.job_id));
-DELETE FROM qiita.job J WHERE NOT EXISTS (
- SELECT * FROM qiita.analysis_job AJ WHERE J.job_id = AJ.job_id);
-
--- In the analysis pipeline, an artifact can have mutliple datatypes
--- (e.g. procrustes). Allow this by creating a new data_type being "multiomic"
-INSERT INTO qiita.data_type (data_type) VALUES ('Multiomic');
-
-
--- The valdiate command from BIOM will have an extra parameter, analysis
--- Magic number -> 4 BIOM command_id -> known for sure since it was added in
--- patch 36.sql
-INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required)
- VALUES (4, 'analysis', 'analysis', FALSE);
--- The template comand now becomes optional, since it can be added either to
--- an analysis or to a prep template. command_parameter_id known from patch
--- 36.sql
-UPDATE qiita.command_parameter SET required = FALSE WHERE command_parameter_id = 34;
-
--- We are going to add a new special software type, and a new software.
--- This is going to be used internally by Qiita, so submit the private jobs.
--- This is needed for the analysis.
-INSERT INTO qiita.software_type (software_type, description)
- VALUES ('private', 'Internal Qiita jobs');
-
-DO $do$
-DECLARE
- qiita_sw_id bigint;
- baf_cmd_id bigint;
-BEGIN
- INSERT INTO qiita.software (name, version, description, environment_script, start_script, software_type_id, active)
- VALUES ('Qiita', 'alpha', 'Internal Qiita jobs', 'export PATH="/home/travis/miniconda3/bin:$PATH"; source activate qiita', 'qiita-private-plugin', 3, True)
- RETURNING software_id INTO qiita_sw_id;
-
- INSERT INTO qiita.software_command (software_id, name, description)
- VALUES (qiita_sw_id, 'build_analysis_files', 'Builds the files needed for the analysis')
- RETURNING command_id INTO baf_cmd_id;
-
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (baf_cmd_id, 'analysis', 'analysis', True, NULL),
- (baf_cmd_id, 'merge_dup_sample_ids', 'bool', False, 'False');
-END $do$;
-
--- Add a new filepath type
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('html_summary_dir'), ('qzv');
diff --git a/qiita_db/support_files/patches/55.sql b/qiita_db/support_files/patches/55.sql
deleted file mode 100644
index 65bafd459..000000000
--- a/qiita_db/support_files/patches/55.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Jul 6, 2017
--- DELETE all sample/prep CONSTRAINTs
-
-SELECT 42;
diff --git a/qiita_db/support_files/patches/56.sql b/qiita_db/support_files/patches/56.sql
deleted file mode 100644
index 84ab6bcb0..000000000
--- a/qiita_db/support_files/patches/56.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Aug 4, 2017
--- DROP qiita.study_experimental_factor
-
-DROP TABLE IF EXISTS qiita.study_experimental_factor;
diff --git a/qiita_db/support_files/patches/57.sql b/qiita_db/support_files/patches/57.sql
deleted file mode 100644
index ac5970659..000000000
--- a/qiita_db/support_files/patches/57.sql
+++ /dev/null
@@ -1,19 +0,0 @@
--- Aug 8, 2017
--- Add release validators internal Qiita command
-
-DO $do$
-DECLARE
- qiita_sw_id bigint;
- rv_cmd_id bigint;
-BEGIN
- SELECT software_id INTO qiita_sw_id
- FROM qiita.software
- WHERE name = 'Qiita' AND version = 'alpha';
-
- INSERT INTO qiita.software_command (software_id, name, description)
- VALUES (qiita_sw_id, 'release_validators', 'Releases the job validators')
- RETURNING command_id INTO rv_cmd_id;
-
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (rv_cmd_id, 'job', 'string', True, NULL);
-END $do$;
diff --git a/qiita_db/support_files/patches/58.sql b/qiita_db/support_files/patches/58.sql
deleted file mode 100644
index 461b1f869..000000000
--- a/qiita_db/support_files/patches/58.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Aug 31, 2017
--- Remove MOI and transfer all jobs to internal QIITA plugin
-
-SELECT 42;
diff --git a/qiita_db/support_files/patches/59.sql b/qiita_db/support_files/patches/59.sql
deleted file mode 100644
index 2c3f2598f..000000000
--- a/qiita_db/support_files/patches/59.sql
+++ /dev/null
@@ -1,14 +0,0 @@
--- Sep 15, 2017
--- Adding "name" parameter to validate commands
-
-DO $do$
-DECLARE
- cmd RECORD;
-BEGIN
- FOR cmd IN
- SELECT command_id FROM qiita.software_command WHERE name = 'Validate'
- LOOP
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (cmd.command_id, 'name', 'string', 'False', 'default_name');
- END LOOP;
-END $do$
diff --git a/qiita_db/support_files/patches/6.sql b/qiita_db/support_files/patches/6.sql
deleted file mode 100644
index 6417bd525..000000000
--- a/qiita_db/support_files/patches/6.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- This file should have been empty as we only need 6.py but it will fail if
--- it's actually empty so doing a simple query
-
-SELECT * FROM settings;
diff --git a/qiita_db/support_files/patches/60.sql b/qiita_db/support_files/patches/60.sql
deleted file mode 100644
index 1ce744064..000000000
--- a/qiita_db/support_files/patches/60.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Sep 20, 2017
--- Allowing per_sample_FASTQ to be submitted to EBI
-
-UPDATE qiita.artifact_type SET can_be_submitted_to_ebi = true WHERE artifact_type='per_sample_FASTQ';
diff --git a/qiita_db/support_files/patches/61.sql b/qiita_db/support_files/patches/61.sql
deleted file mode 100644
index acf115265..000000000
--- a/qiita_db/support_files/patches/61.sql
+++ /dev/null
@@ -1,2 +0,0 @@
--- October 30th, 2017
-SELECT 42;
diff --git a/qiita_db/support_files/patches/62.sql b/qiita_db/support_files/patches/62.sql
deleted file mode 100644
index 3cce49da5..000000000
--- a/qiita_db/support_files/patches/62.sql
+++ /dev/null
@@ -1,59 +0,0 @@
--- November 15th, 2017
-ALTER TABLE qiita.command_output ADD check_biom_merge bool DEFAULT 'False' NOT NULL;
-ALTER TABLE qiita.command_parameter ADD name_order integer ;
-ALTER TABLE qiita.command_parameter ADD check_biom_merge bool DEFAULT 'False' NOT NULL;
-
--- Nov 22, 2017
--- Adding a hidden variable to the qiita.processing_job column
-
-ALTER TABLE qiita.processing_job ADD COLUMN hidden boolean DEFAULT FALSE;
-
-UPDATE qiita.processing_job
-SET hidden = TRUE
-WHERE processing_job_id IN (
- SELECT processing_job_id
- FROM qiita.processing_job
- LEFT JOIN qiita.processing_job_status USING (processing_job_status_id)
- WHERE processing_job_status != 'success');
-
--- Nov 28, 2017 (only in py file)
--- Adding a new command into Qiita/Alpha: delete_analysis
-
--- Nov 30, 2017 (only in py file)
--- Expand artifact name size
-
-ALTER TABLE qiita.artifact ALTER COLUMN name TYPE VARCHAR;
-
--- Dec 3, 2017
--- Adding a function to retrieve the workflow roots of any job
-CREATE OR REPLACE FUNCTION qiita.get_processing_workflow_roots(job_id UUID) RETURNS SETOF UUID AS $$
-BEGIN
- IF EXISTS(SELECT * FROM qiita.processing_job_workflow_root WHERE processing_job_id = job_id) THEN
- RETURN QUERY SELECT job_id;
- ELSE
- RETURN QUERY WITH RECURSIVE root AS (
- SELECT child_id, parent_id
- FROM qiita.parent_processing_job
- WHERE child_id = job_id
- UNION
- SELECT p.child_id, p.parent_id
- FROM qiita.parent_processing_job p
- JOIN root r ON (r.parent_id = p.child_id)
- )
- SELECT DISTINCT parent_id
- FROM root
- WHERE parent_id NOT IN (SELECT child_id FROM qiita.parent_processing_job);
- END IF;
-END
-$$ LANGUAGE plpgsql;
-
--- Dec 7, 2017
--- Adding new data types to qiita
-INSERT INTO qiita.data_type (data_type) VALUES ('Metatranscriptomics'), ('Viromics'), ('Genomics'), ('Transcriptomics');
-
--- Dev 7, 2017
--- Adding the name column to the prep template
-ALTER TABLE qiita.prep_template ADD name varchar DEFAULT 'Default Name' NOT NULL;
-
--- Set the default name to be the previous name that was shown
-UPDATE qiita.prep_template SET name = 'Prep information ' || prep_template_id::varchar;
diff --git a/qiita_db/support_files/patches/63.sql b/qiita_db/support_files/patches/63.sql
deleted file mode 100644
index df14181bf..000000000
--- a/qiita_db/support_files/patches/63.sql
+++ /dev/null
@@ -1,49 +0,0 @@
--- December 27th, 2017
--- Creating archive feature tables
-
-CREATE TABLE qiita.archive_merging_scheme (
- archive_merging_scheme_id bigserial NOT NULL,
- archive_merging_scheme varchar NOT NULL,
- CONSTRAINT pk_merging_scheme PRIMARY KEY ( archive_merging_scheme_id )
- ) ;
-
-CREATE TABLE qiita.archive_feature_value (
- archive_merging_scheme_id bigint NOT NULL,
- archive_feature varchar NOT NULL,
- archive_feature_value varchar NOT NULL,
- CONSTRAINT idx_archive_feature_value PRIMARY KEY ( archive_merging_scheme_id, archive_feature )
- ) ;
-
-CREATE INDEX idx_archive_feature_value_0 ON qiita.archive_feature_value ( archive_merging_scheme_id ) ;
-
-ALTER TABLE qiita.archive_feature_value ADD CONSTRAINT fk_archive_feature_value FOREIGN KEY ( archive_merging_scheme_id ) REFERENCES qiita.archive_merging_scheme( archive_merging_scheme_id );
-
--- taken from https://goo.gl/YtSvz2
-CREATE OR REPLACE FUNCTION archive_upsert(amsi INT, af VARCHAR, afv VARCHAR) RETURNS VOID AS $$
-BEGIN
- LOOP
- -- first try to update the key
- UPDATE qiita.archive_feature_value SET archive_feature_value = afv WHERE archive_merging_scheme_id = amsi AND archive_feature = af;
- IF found THEN
- RETURN;
- END IF;
- -- not there, so try to insert the key
- -- if someone else inserts the same key concurrently,
- -- we could get a unique-key failure
- BEGIN
- INSERT INTO qiita.archive_feature_value (archive_merging_scheme_id, archive_feature, archive_feature_value) VALUES (amsi, af, afv);
- RETURN;
- EXCEPTION WHEN unique_violation THEN
- -- Do nothing, and loop to try the UPDATE again.
- END;
- END LOOP;
-END;
-$$
-LANGUAGE plpgsql;
-
--- January 25th, 2017
--- Adding to artifact_type is_user_uploadable
--- Note that at time of creation we will need to update the following qiita-spots: qtp-biom, qtp-visualization, qtp-diversity, qtp-target-gene & qtp-template-cookiecutter
-
-ALTER TABLE qiita.artifact_type ADD is_user_uploadable BOOL DEFAULT FALSE;
-UPDATE qiita.artifact_type SET is_user_uploadable=TRUE WHERE artifact_type IN ('FASTQ', 'BIOM', 'per_sample_FASTQ');
diff --git a/qiita_db/support_files/patches/64.sql b/qiita_db/support_files/patches/64.sql
deleted file mode 100644
index c293f0a5f..000000000
--- a/qiita_db/support_files/patches/64.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- March 7, 2018
--- delete ebi_submission_status from study
-
-ALTER TABLE qiita.study DROP ebi_submission_status;
diff --git a/qiita_db/support_files/patches/65.sql b/qiita_db/support_files/patches/65.sql
deleted file mode 100644
index c4e90d141..000000000
--- a/qiita_db/support_files/patches/65.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Jul 5, 2018
--- add ignore_parent_command to software_comamnd
-
-ALTER TABLE qiita.software_command ADD ignore_parent_command BOOL DEFAULT FALSE NOT NULL;
diff --git a/qiita_db/support_files/patches/66.sql b/qiita_db/support_files/patches/66.sql
deleted file mode 100644
index 9847befcd..000000000
--- a/qiita_db/support_files/patches/66.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- August 6, 2018
-
-SELECT 42;
-
--- August 22, 2018
--- add specimen_id_column to study table (needed to plate samples in labman)
-
-ALTER TABLE qiita.study ADD specimen_id_column varchar(256);
-
-COMMENT ON COLUMN qiita.study.specimen_id_column IS 'The name of the column that describes the specimen identifiers (such as what is written on the tubes).';
-
--- September 12, 2018
--- add deprecated to software table
-
-ALTER TABLE qiita.software ADD deprecated bool default False;
diff --git a/qiita_db/support_files/patches/67.sql b/qiita_db/support_files/patches/67.sql
deleted file mode 100644
index ead39135c..000000000
--- a/qiita_db/support_files/patches/67.sql
+++ /dev/null
@@ -1,11 +0,0 @@
--- October 6, 2018
--- add post_processing_cmd column to record additional information required to merge some BIOMS.
-
-ALTER TABLE qiita.software_command ADD post_processing_cmd varchar;
-COMMENT ON COLUMN qiita.software_command.post_processing_cmd IS 'Store information on additional post-processing steps for merged BIOMs, if any.';
-
--- October 25, 2018
--- add public_raw_download to study
-
-ALTER TABLE qiita.study ADD public_raw_download bool default False;
-
diff --git a/qiita_db/support_files/patches/68.sql b/qiita_db/support_files/patches/68.sql
deleted file mode 100644
index b65cffbae..000000000
--- a/qiita_db/support_files/patches/68.sql
+++ /dev/null
@@ -1,51 +0,0 @@
--- December 21, 2018
--- Strip non-printable-ASCII characters from study_person.name
-UPDATE qiita.study_person SET name = regexp_replace(name, '[^\x20-\x7E]+', '', 'g');
-
--- November 21, 2018
--- moving sample and prep info files to jsonb
-
--- Due to error:
--- ValueError: Error running SQL: OUT_OF_MEMORY. MSG: out of shared memory
--- HINT: You might need to increase max_locks_per_transaction.
--- we need to split the full patch in 4 so the continuation is 69.sql,
--- 70.sql and 71.sql
-
--- 1/4 Sample template
-DO $do$
-DECLARE
- dyn_t varchar;
- dyn_table varchar;
- dyn_table_bk varchar;
- sid varchar;
-BEGIN
- FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE SUBSTR(table_name, 1, 7) = 'sample_'
- AND table_schema = 'qiita'
- AND table_name != 'sample_template_filepath'
- LOOP
- dyn_table := 'qiita.' || dyn_t;
- dyn_table_bk := dyn_t || '_bk';
-
- -- rename the tables so we can move the data later
- EXECUTE format('ALTER TABLE %1$s RENAME TO %2$s', dyn_table, dyn_table_bk);
-
- -- create the new table, note that there are no constraints so the
- -- inserts go fast but we will add them later
- EXECUTE format('CREATE TABLE %1$s (sample_id VARCHAR NOT NULL, sample_values JSONB)', dyn_table);
-
- -- inserting our helper column qiita_sample_column_names, which is going keep all our columns; this is much easier than trying to keep all rows with the same values
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''qiita_sample_column_names'', (''{"columns":'' || (SELECT json_agg(column_name::text) FROM information_schema.columns WHERE table_name=''' || dyn_table_bk || ''' AND table_schema=''qiita'' AND column_name != ''sample_id'')::text || ''}'')::json);';
- -- inserting value per value of the table, this might take forever
- FOR sid IN
- EXECUTE 'SELECT sample_id FROM qiita.' || dyn_table_bk
- LOOP
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''' || sid || ''', (SELECT row_to_json(t)::jsonb - ''sample_id'' FROM (SELECT * FROM qiita.' || dyn_table_bk || ' WHERE sample_id = ''' || sid || ''') t));';
- END LOOP;
-
- -- adding index
- EXECUTE 'ALTER TABLE ' || dyn_table || ' ADD CONSTRAINT pk_jsonb_' || dyn_t || ' PRIMARY KEY ( sample_id );';
- END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/69.sql b/qiita_db/support_files/patches/69.sql
deleted file mode 100644
index 50cca0937..000000000
--- a/qiita_db/support_files/patches/69.sql
+++ /dev/null
@@ -1,49 +0,0 @@
--- November 21, 2018
--- moving sample and prep info files to jsonb
-
--- 2/4 This is the continuation of 68.sql, let's move the data for the
--- prep templates but only for prep ids < 3500
-DO $do$
-DECLARE
- dyn_t varchar;
- dyn_table varchar;
- dyn_table_bk varchar;
- sid varchar;
-BEGIN
- FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE SUBSTR(table_name, 1, 5) = 'prep_'
- AND table_schema = 'qiita'
- AND table_name NOT IN ('prep_template',
- 'prep_template_preprocessed_data',
- 'prep_template_filepath',
- 'prep_columns',
- 'prep_template_processing_job',
- 'prep_template_sample')
- AND SUBSTR(table_name, 6)::INT < 3500
- LOOP
- dyn_table := 'qiita.' || dyn_t;
- dyn_table_bk := dyn_t || '_bk';
-
- -- rename the tables so we can move the data later
- EXECUTE format('ALTER TABLE %1$s RENAME TO %2$s', dyn_table, dyn_table_bk);
-
- -- create the new table, note that there are no constraints so the
- -- inserts go fast but we will add them later
- EXECUTE format('CREATE TABLE %1$s (sample_id VARCHAR NOT NULL, sample_values JSONB)', dyn_table);
-
- -- inserting our helper column qiita_sample_column_names, which is going keep all our columns; this is much easier than trying to keep all rows with the same values
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''qiita_sample_column_names'', (''{"columns":'' || (SELECT json_agg(column_name::text) FROM information_schema.columns WHERE table_name=''' || dyn_table_bk || ''' AND table_schema=''qiita'' AND column_name != ''sample_id'')::text || ''}'')::json);';
-
- -- inserting value per value of the table, this might take forever
- FOR sid IN
- EXECUTE 'SELECT sample_id FROM qiita.' || dyn_table_bk
- LOOP
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''' || sid || ''', (SELECT row_to_json(t)::jsonb - ''sample_id'' FROM (SELECT * FROM qiita.' || dyn_table_bk || ' WHERE sample_id = ''' || sid || ''') t));';
- END LOOP;
-
- -- adding index
- EXECUTE 'ALTER TABLE ' || dyn_table || ' ADD CONSTRAINT pk_jsonb_' || dyn_t || ' PRIMARY KEY ( sample_id );';
- END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/7.sql b/qiita_db/support_files/patches/7.sql
deleted file mode 100644
index 3c8d1b881..000000000
--- a/qiita_db/support_files/patches/7.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Nov 23, 2014
--- Adding a new filepath_type = qiime_map
-
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('qiime_map');
diff --git a/qiita_db/support_files/patches/70.sql b/qiita_db/support_files/patches/70.sql
deleted file mode 100644
index 30542712f..000000000
--- a/qiita_db/support_files/patches/70.sql
+++ /dev/null
@@ -1,50 +0,0 @@
--- November 21, 2018
--- moving sample and prep info files to jsonb
-
--- 3/4 This is the continuation of the patching that started in 68.sql, let's
--- move the data for the prep templates but now for prep ids >= 3500
-DO $do$
-DECLARE
- dyn_t varchar;
- dyn_table varchar;
- dyn_table_bk varchar;
- sid varchar;
-BEGIN
- FOR dyn_t IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE SUBSTR(table_name, 1, 5) = 'prep_'
- AND table_schema = 'qiita'
- AND table_name NOT IN ('prep_template',
- 'prep_template_preprocessed_data',
- 'prep_template_filepath',
- 'prep_columns',
- 'prep_template_processing_job',
- 'prep_template_sample')
- AND table_name NOT LIKE '%_bk'
- AND SUBSTR(table_name, 6)::INT >= 3500
- LOOP
- dyn_table := 'qiita.' || dyn_t;
- dyn_table_bk := dyn_t || '_bk';
-
- -- rename the tables so we can move the data later
- EXECUTE format('ALTER TABLE %1$s RENAME TO %2$s', dyn_table, dyn_table_bk);
-
- -- create the new table, note that there are no constraints so the
- -- inserts go fast but we will add them later
- EXECUTE format('CREATE TABLE %1$s (sample_id VARCHAR NOT NULL, sample_values JSONB)', dyn_table);
-
- -- inserting our helper column qiita_sample_column_names, which is going keep all our columns; this is much easier than trying to keep all rows with the same values
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''qiita_sample_column_names'', (''{"columns":'' || (SELECT json_agg(column_name::text) FROM information_schema.columns WHERE table_name=''' || dyn_table_bk || ''' AND table_schema=''qiita'' AND column_name != ''sample_id'')::text || ''}'')::json);';
-
- -- inserting value per value of the table, this might take forever
- FOR sid IN
- EXECUTE 'SELECT sample_id FROM qiita.' || dyn_table_bk
- LOOP
- EXECUTE 'INSERT INTO ' || dyn_table || ' (sample_id, sample_values) VALUES (''' || sid || ''', (SELECT row_to_json(t)::jsonb - ''sample_id'' FROM (SELECT * FROM qiita.' || dyn_table_bk || ' WHERE sample_id = ''' || sid || ''') t));';
- END LOOP;
-
- -- adding index
- EXECUTE 'ALTER TABLE ' || dyn_table || ' ADD CONSTRAINT pk_jsonb_' || dyn_t || ' PRIMARY KEY ( sample_id );';
- END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/71.sql b/qiita_db/support_files/patches/71.sql
deleted file mode 100644
index 56a5540c0..000000000
--- a/qiita_db/support_files/patches/71.sql
+++ /dev/null
@@ -1,23 +0,0 @@
--- November 21, 2018
--- moving sample and prep info files to jsonb
-
--- 4/4 This is the continuation of the patching that started in 68.sql, let's
--- remove all the temp (_bk) tables we created
-
--- On January 17, 2019 while releasing the release candidate; we realized that
--- we were hitting the max memory allocated for the machine so we needed to
--- move this code to python; original code
--- DO $do$
--- DECLARE
--- dyn_table varchar;
--- BEGIN
--- FOR dyn_table IN
--- SELECT DISTINCT table_name
--- FROM information_schema.columns
--- WHERE table_name LIKE '%_bk' LIMIT 2100
--- LOOP
--- EXECUTE 'DROP TABLE qiita.' || dyn_table;
--- END LOOP;
--- END $do$;
-
-SELECT 42;
diff --git a/qiita_db/support_files/patches/72.sql b/qiita_db/support_files/patches/72.sql
deleted file mode 100644
index a02c3ae86..000000000
--- a/qiita_db/support_files/patches/72.sql
+++ /dev/null
@@ -1,22 +0,0 @@
--- November 21, 2018
--- moving sample and prep info files to jsonb
-
--- 4/4 This is the continuation of the patching that started in 68.sql, let's
--- remove all the temp (_bk) tables we created
-
--- On January 17, 2019 while releasing the release candidate; we realized that
--- we were hitting the max memory allocated for the machine so we needed to
--- split this into two
--- Dropping all the _bk tables 2/2
-DO $do$
-DECLARE
- dyn_table varchar;
-BEGIN
- FOR dyn_table IN
- SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE table_name LIKE '%_bk'
- LOOP
- EXECUTE 'DROP TABLE qiita.' || dyn_table;
- END LOOP;
-END $do$;
diff --git a/qiita_db/support_files/patches/73.sql b/qiita_db/support_files/patches/73.sql
deleted file mode 100644
index 3b5b937b4..000000000
--- a/qiita_db/support_files/patches/73.sql
+++ /dev/null
@@ -1,62 +0,0 @@
--- January 4, 2019
--- add external_job_id column to record mapping of Torque Job IDs to Qiita Job IDs.
--- COMMENT ON COLUMN qiita.processing_job IS 'Store an external job ID (e.g. Torque job ID) associated this Qiita job.';
-
-ALTER TABLE qiita.processing_job ADD external_job_id varchar;
-COMMENT ON COLUMN qiita.processing_job.external_job_id IS 'Store an external job ID (e.g. Torque job ID) associated this Qiita job.';
-
-CREATE TABLE qiita.processing_job_resource_allocation
-(
- name varchar,
- description varchar,
- job_type varchar,
- allocation varchar,
- PRIMARY KEY(name, job_type)
-);
-
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('REGISTER', 'single-core-8gb', 'REGISTER', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('default', 'single-core-8gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('default', 'single-core-8gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('default', 'multi-core-vlow', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=5 -l pmem=8gb -l walltime=168:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('delete_analysis', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Calculate beta correlation', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('delete_sample_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('delete_study', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('delete_sample_or_column', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('create_sample_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('update_prep_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('copy_artifact', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('delete_artifact', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('download_remote_files', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('list_remote_files', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('submit_to_EBI', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Generate HTML summary', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l pmem=8gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('update_sample_template', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('build_analysis_files', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Custom-axis Emperor plot', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Calculate alpha correlation', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Summarize taxa', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Perform Principal Coordinates Analysis (PCoA)', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Split libraries', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Calculate alpha diversity', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Calculate beta diversity', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Calculate beta group significance', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Filter samples by metadata', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Rarefy features', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Validate', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=60gb -l walltime=25:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Trimming', 'single-core-120gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=120gb -l walltime=80:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Split libraries FASTQ', 'single-core-120gb', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=1 -l mem=120gb -l walltime=80:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Deblur', 'multi-core-low', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=5 -l mem=96gb -l walltime=130:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Shogun', 'multi-core-low', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=5 -l mem=96gb -l walltime=130:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Pick closed-reference OTUs', 'multi-core-high', 'RESOURCE_PARAMS_COMMAND', '-q qiita -l nodes=1:ppn=5 -l mem=120gb -l walltime=130:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Pick closed-reference OTUs', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=24gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Trimming', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=24gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Filter samples by metadata', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=24gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Rarefy features', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=24gb -l walltime=50:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('BIOM', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('alpha_vector', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('distance_matrix', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('Demultiplexed', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('ordination_results', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-insert into qiita.processing_job_resource_allocation (name, description, job_type, allocation) values ('q2_visualization', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-q qiita -l nodes=1:ppn=1 -l mem=16gb -l walltime=10:00:00');
-
diff --git a/qiita_db/support_files/patches/74.sql b/qiita_db/support_files/patches/74.sql
deleted file mode 100644
index 9e035ec2e..000000000
--- a/qiita_db/support_files/patches/74.sql
+++ /dev/null
@@ -1,54 +0,0 @@
--- Apr 2nd, 2019
--- Add a new filepath type
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('qza');
-
--- Apr 16th, 2019
--- Removing emp_person_id from Qiita
-DROP INDEX qiita.idx_study_1;
-ALTER TABLE qiita.study DROP CONSTRAINT fk_study_study_emp_person;
-ALTER TABLE qiita.study DROP COLUMN emp_person_id;
-
--- Apr 18th, 2019
--- adding fp_size to filepaths to store the filepath size
-ALTER TABLE qiita.filepath ADD fp_size BIGINT NOT NULL DEFAULT 0;
-
--- Apr 25th, 2019
--- adding restriction tables for sample/prep info files
--- values taken from ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_5/SRA.common.xsd
-CREATE TABLE qiita.restrictions (
- table_name varchar,
- name varchar,
- valid_values varchar[]
-);
-INSERT INTO qiita.restrictions (table_name, name, valid_values) VALUES
- -- inserting the sample info file restrictions
- ('study_sample', 'env_package', ARRAY[
- 'air', 'built environment', 'host-associated', 'human-associated',
- 'human-skin', 'human-oral', 'human-gut', 'human-vaginal',
- 'microbial mat/biofilm', 'misc environment', 'plant-associated',
- 'sediment', 'soil', 'wastewater/sludge', 'water']),
- -- inserting the prep info file restrictions
- ('prep_template_sample', 'target_gene', ARRAY[
- '16S rRNA', '18S rRNA', 'ITS1/2', 'LSU']),
- ('prep_template_sample', 'platform', ARRAY[
- 'FASTA', 'Illumina', 'Ion Torrent', 'LS454', 'Oxford Nanopore']),
- ('prep_template_sample', 'target_subfragment', ARRAY[
- 'V3', 'V4', 'V6', 'V9', 'ITS1/2']),
- ('prep_template_sample', 'instrument_model', ARRAY[
- -- LS454
- '454 GS', '454 GS 20', '454 GS FLX', '454 GS FLX+', '454 GS FLX Titanium',
- '454 GS Junior',
- -- Illumina
- 'Illumina Genome Analyzer', 'Illumina Genome Analyzer II',
- 'Illumina Genome Analyzer IIx', 'Illumina HiScanSQ',
- 'Illumina HiSeq 1000', 'Illumina HiSeq 1500', 'Illumina HiSeq 2000',
- 'Illumina HiSeq 2500', 'Illumina HiSeq 3000', 'Illumina HiSeq 4000',
- 'Illumina MiSeq', 'Illumina MiniSeq', 'Illumina NovaSeq 6000',
- 'NextSeq 500', 'NextSeq 550',
- -- Ion Torren
- 'Ion Torrent PGM', 'Ion Torrent Proton', 'Ion Torrent S5',
- 'Ion Torrent S5 XL',
- -- Oxford Nanopore
- 'MinION', 'GridION', 'PromethION',
- -- all
- 'unspecified']);
diff --git a/qiita_db/support_files/patches/75.sql b/qiita_db/support_files/patches/75.sql
deleted file mode 100644
index 6cfa8da03..000000000
--- a/qiita_db/support_files/patches/75.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- Jul 1st, 2019
--- fix #2901, no DB changes, all in py
-SELECT 42;
diff --git a/qiita_db/support_files/patches/76.sql b/qiita_db/support_files/patches/76.sql
deleted file mode 100644
index 14d33e051..000000000
--- a/qiita_db/support_files/patches/76.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Aug 28st, 2019
--- fix #2933
-CREATE TABLE qiita.stats_daily (
- stats JSONB NOT NULL,
- stats_timestamp TIMESTAMP NOT NULL
-);
diff --git a/qiita_db/support_files/patches/77.sql b/qiita_db/support_files/patches/77.sql
deleted file mode 100644
index 1a9abaa2b..000000000
--- a/qiita_db/support_files/patches/77.sql
+++ /dev/null
@@ -1,9 +0,0 @@
--- Nov 27, 2019
--- Adds download_link table for allowing jwt secured downloads of artifacts from shortened links
-CREATE TABLE qiita.download_link (
- jti VARCHAR(32) PRIMARY KEY NOT NULL,
- jwt TEXT NOT NULL,
- exp TIMESTAMP NOT NULL
-);
-
-CREATE INDEX idx_download_link_exp ON qiita.download_link ( exp ) ;
diff --git a/qiita_db/support_files/patches/78.sql b/qiita_db/support_files/patches/78.sql
deleted file mode 100644
index e6535d9bc..000000000
--- a/qiita_db/support_files/patches/78.sql
+++ /dev/null
@@ -1,33 +0,0 @@
--- Nov 27, 2019
--- Adds download_link table for allowing jwt secured downloads of artifacts from shortened links
-ALTER TABLE qiita.prep_template ADD deprecated BOOL DEFAULT FALSE;
-
-ALTER TABLE qiita.study ADD notes TEXT NOT NULL DEFAULT '';
-
-CREATE TABLE qiita.preparation_artifact (
- prep_template_id BIGINT,
- artifact_id BIGINT,
- CONSTRAINT fk_prep_template_id FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id ),
- CONSTRAINT fk_artifact_id FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id )
-);
-
-INSERT INTO qiita.preparation_artifact (artifact_id, prep_template_id)
- SELECT a.artifact_id, prep_template_id FROM qiita.artifact a, qiita.find_artifact_roots(artifact_id) root_id
- JOIN qiita.prep_template pt ON (root_id = pt.artifact_id);
-ALTER TABLE qiita.preparation_artifact ADD PRIMARY KEY (prep_template_id, artifact_id);
-CREATE INDEX idx_preparation_artifact_prep_template_id ON qiita.preparation_artifact ( prep_template_id );
-
-CREATE OR REPLACE FUNCTION qiita.bioms_from_preparation_artifacts(prep_id bigint) RETURNS TEXT AS $$
-DECLARE
- artifacts TEXT := NULL;
-BEGIN
- SELECT array_to_string(array_agg(artifact_id), ',') INTO artifacts
- FROM qiita.preparation_artifact
- LEFT JOIN qiita.artifact USING (artifact_id)
- LEFT JOIN qiita.artifact_type USING (artifact_type_id)
- LEFT JOIN qiita.software_command USING (command_id)
- LEFT JOIN qiita.software USING (software_id)
- WHERE prep_template_id = prep_id AND artifact_type = 'BIOM' AND NOT deprecated;
- RETURN artifacts;
-END
-$$ LANGUAGE plpgsql;
diff --git a/qiita_db/support_files/patches/79.sql b/qiita_db/support_files/patches/79.sql
deleted file mode 100644
index ad5575724..000000000
--- a/qiita_db/support_files/patches/79.sql
+++ /dev/null
@@ -1,16 +0,0 @@
--- Jun 23, 2020
--- Adds a new job_type VALIDATOR to processing_job_resource_allocation
-
-INSERT INTO qiita.processing_job_resource_allocation (name, job_type, allocation) VALUES
- ('default', 'VALIDATOR', '-q qiita -l nodes=1:ppn=1 -l mem=1gb -l walltime=4:00:00'),
- ('per_sample_FASTQ', 'VALIDATOR', '-q qiita -l nodes=1:ppn=5 -l mem=2gb -l walltime=10:00:00'),
- ('ordination_results', 'VALIDATOR', '-q qiita -l nodes=1:ppn=1 -l mem=10gb -l walltime=2:00:00'),
- ('Demultiplexed', 'VALIDATOR', '-q qiita -l nodes=1:ppn=5 -l mem=25gb -l walltime=150:00:00'),
- ('distance_matrix', 'VALIDATOR', '-q qiita -l nodes=1:ppn=1 -l mem=42gb -l walltime=150:00:00'),
- ('BIOM', 'VALIDATOR', '-q qiita -l nodes=1:ppn=1 -l mem=90gb -l walltime=150:00:00'),
- ('alpha_vector', 'VALIDATOR', '-q qiita -l nodes=1:ppn=1 -l mem=10gb -l walltime=70:00:00');
-
-
--- For EBI-ENA, Ion Torren is Ion_Torrent
-UPDATE qiita.restrictions SET valid_values = ARRAY['FASTA', 'Illumina', 'Ion_Torrent', 'LS454', 'Oxford Nanopore']
- WHERE table_name = 'prep_template_sample' AND name = 'platform';
diff --git a/qiita_db/support_files/patches/8.sql b/qiita_db/support_files/patches/8.sql
deleted file mode 100644
index caff047c9..000000000
--- a/qiita_db/support_files/patches/8.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Dec 12, 2014
--- Adding the VAMPS status field in all preprocessed data
-
-ALTER TABLE qiita.preprocessed_data ADD submitted_to_vamps_status varchar DEFAULT 'not submitted' ;
diff --git a/qiita_db/support_files/patches/80.sql b/qiita_db/support_files/patches/80.sql
deleted file mode 100644
index ce9ab7ae3..000000000
--- a/qiita_db/support_files/patches/80.sql
+++ /dev/null
@@ -1,4 +0,0 @@
--- Nov 10, 2020
--- Add a flag to the studies to see if the study was submitted by Qiita or downloaded by EBI
-
-ALTER TABLE qiita.study ADD autoloaded BOOL NOT NULL DEFAULT false;
diff --git a/qiita_db/support_files/patches/81.sql b/qiita_db/support_files/patches/81.sql
deleted file mode 100644
index 9157be92d..000000000
--- a/qiita_db/support_files/patches/81.sql
+++ /dev/null
@@ -1,49 +0,0 @@
--- Jan 25, 2021
--- Add creation_timestamp and modification_timestamp for qiita.prep_template
-
-ALTER TABLE qiita.prep_template ADD creation_timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
-ALTER TABLE qiita.prep_template ADD modification_timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
-
-
-
--- Feb 23, 2021
-
--- a. Removing software_id from qiita.default_workflow and replacing it by a
--- table which will like different data_types with the default_workflow +
--- adding an active flag in case we need to deprecate default_workflows +
--- adding a description column
-ALTER TABLE qiita.default_workflow DROP software_id;
-CREATE TABLE qiita.default_workflow_data_type (
- default_workflow_id BIGINT NOT NULL,
- data_type_id BIGINT NOT NULL,
- CONSTRAINT fk_default_workflow_id FOREIGN KEY ( default_workflow_id ) REFERENCES qiita.default_workflow( default_workflow_id ),
- CONSTRAINT fk_data_type_id FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type ( data_type_id ),
- PRIMARY KEY(default_workflow_id, data_type_id)
-);
-ALTER TABLE qiita.default_workflow ADD active BOOL DEFAULT TRUE;
-ALTER TABLE qiita.default_workflow ADD description TEXT;
-
--- b. Removing command_id from qiita.default_workflow_node and default_parameter_set as this information
--- can be accessed via the default_parameter object (the info is duplicated)
-ALTER TABLE qiita.default_workflow_node DROP command_id;
-
--- c. Linking some of the data_types with the default_workflows; note that this
--- is fine for the test database but we are going to need to clean up and
--- insert the most up to date recommendations directly in qiita.ucsd.edu
-INSERT INTO qiita.default_workflow_data_type (default_workflow_id, data_type_id) VALUES
- -- data types:
- -- 1 | 16S
- -- 2 | 18S
- -- 3 | ITS
- (1, 1),
- (1, 2),
- (2, 2),
- (3, 3);
-
--- d. adding descriptions
-UPDATE qiita.default_workflow
- SET description = 'This accepts html Qiita!
BYE!'
- WHERE default_workflow_id = 1;
-UPDATE qiita.default_workflow
- SET description = 'This is another description'
- WHERE default_workflow_id = 2;
diff --git a/qiita_db/support_files/patches/82.sql b/qiita_db/support_files/patches/82.sql
deleted file mode 100644
index 45b3d32e0..000000000
--- a/qiita_db/support_files/patches/82.sql
+++ /dev/null
@@ -1,55 +0,0 @@
--- May 25, 2021
--- Adding max samples in a single preparation
--- we need to do it via a DO because IF NOT EXISTS in ALTER TABLE only exists
--- in PostgreSQL 9.6 or higher and we use 9.5
-DO $do$
-BEGIN
- IF NOT EXISTS (
- SELECT DISTINCT table_name FROM information_schema.columns
- WHERE table_name = 'settings' AND column_name = 'max_preparation_samples'
- ) THEN
- ALTER TABLE settings ADD COLUMN max_preparation_samples INT DEFAULT 800;
- END IF;
-END $do$;
-
-ALTER TABLE qiita.analysis
- DROP CONSTRAINT fk_analysis_user,
- ADD CONSTRAINT fk_analysis_user
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
-
-ALTER TABLE qiita.study_users
- DROP CONSTRAINT fk_study_users_user,
- ADD CONSTRAINT fk_study_users_user
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
-
-ALTER TABLE qiita.message_user
- DROP CONSTRAINT fk_message_user_0,
- ADD CONSTRAINT fk_message_user_0
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
-
-ALTER TABLE qiita.processing_job
- DROP CONSTRAINT fk_processing_job_qiita_user,
- ADD CONSTRAINT fk_processing_job_qiita_user
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
-
-ALTER TABLE qiita.processing_job_workflow
- DROP CONSTRAINT fk_processing_job_workflow,
- ADD CONSTRAINT fk_processing_job_workflow
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
-
-ALTER TABLE qiita.study
- DROP CONSTRAINT fk_study_user,
- ADD CONSTRAINT fk_study_user
- FOREIGN KEY (email)
- REFERENCES qiita.qiita_user(email)
- ON UPDATE CASCADE;
diff --git a/qiita_db/support_files/patches/83.sql b/qiita_db/support_files/patches/83.sql
deleted file mode 100644
index 4dd7e4f3e..000000000
--- a/qiita_db/support_files/patches/83.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- August 16, 2021
--- adding new artifact_type for raw_job_folder
-INSERT INTO qiita.data_type (data_type) VALUES ('Job Output Folder');
diff --git a/qiita_db/support_files/patches/84.sql b/qiita_db/support_files/patches/84.sql
deleted file mode 100644
index 1a6da1c21..000000000
--- a/qiita_db/support_files/patches/84.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- October 21, 2021
--- adding a new user level: wet-lab admin
-INSERT INTO qiita.user_level (name, description) VALUES ('wet-lab admin', 'Can access the private jobs');
diff --git a/qiita_db/support_files/patches/85.sql b/qiita_db/support_files/patches/85.sql
deleted file mode 100644
index 633158047..000000000
--- a/qiita_db/support_files/patches/85.sql
+++ /dev/null
@@ -1,28 +0,0 @@
--- Feb 22, 2022
--- adding a new parameter `categories` to build_analysis_files
-
-DO $do$
-DECLARE
- cmd_id bigint;
-BEGIN
- SELECT command_id INTO cmd_id FROM qiita.software_command WHERE name = 'build_analysis_files';
-
- INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
- VALUES (cmd_id, 'categories', 'mchoice', True, NULL);
-END $do$;
-
--- Feb 28, 2022
--- adding a new column to the default_workflow table to keep track of the
--- artifact type that is expecting vs. "guessing"
-
-ALTER TABLE qiita.default_workflow ADD artifact_type_id BIGINT NOT NULL DEFAULT 3;
-ALTER TABLE qiita.default_workflow
- ADD CONSTRAINT fk_artifact_type_id
- FOREIGN KEY (artifact_type_id)
- REFERENCES qiita.artifact_type(artifact_type_id)
- ON UPDATE CASCADE;
-
--- Mar 17, 2022
--- deleting specimen_id_column from qiita.study
-
-ALTER TABLE qiita.study DROP COLUMN specimen_id_column;
diff --git a/qiita_db/support_files/patches/86.sql b/qiita_db/support_files/patches/86.sql
deleted file mode 100644
index 2d6bb6ec9..000000000
--- a/qiita_db/support_files/patches/86.sql
+++ /dev/null
@@ -1,33 +0,0 @@
--- Jun 8, 2022
--- adding the new visibility level: archived
-
-INSERT INTO qiita.visibility (visibility, visibility_description) VALUES ('archived', 'Archived artifact');
-
--- update function to ignore archived artifacts
-CREATE OR REPLACE FUNCTION qiita.bioms_from_preparation_artifacts(prep_id bigint) RETURNS TEXT AS $$
-DECLARE
- artifacts TEXT := NULL;
-BEGIN
- SELECT array_to_string(array_agg(artifact_id), ',') INTO artifacts
- FROM qiita.preparation_artifact
- LEFT JOIN qiita.artifact USING (artifact_id)
- LEFT JOIN qiita.artifact_type USING (artifact_type_id)
- LEFT JOIN qiita.software_command USING (command_id)
- LEFT JOIN qiita.software USING (software_id)
- LEFT JOIN qiita.visibility USING (visibility_id)
- WHERE
- prep_template_id = prep_id AND
- artifact_type = 'BIOM' AND
- NOT deprecated AND
- visibility != 'archived';
- RETURN artifacts;
-END
-$$ LANGUAGE plpgsql;
-
--- Jun 13, 2022
--- adding an archive_data column to the artifact
-ALTER TABLE qiita.artifact ADD archive_data JSONB DEFAULT NULL;
-
--- Jun 15, 2022
--- adding
-ALTER TABLE qiita.qiita_user ADD receive_processing_job_emails BOOL DEFAULT FALSE;
diff --git a/qiita_db/support_files/patches/87.sql b/qiita_db/support_files/patches/87.sql
deleted file mode 100644
index e6415c3d4..000000000
--- a/qiita_db/support_files/patches/87.sql
+++ /dev/null
@@ -1,18 +0,0 @@
--- Aug 11, 2022
--- updating resource allocations to use slurm
-UPDATE qiita.processing_job_resource_allocation SET allocation =
-REPLACE(
- REPLACE(
- REPLACE(
- REPLACE(
- REPLACE(
- REPLACE(
- REPLACE(allocation, '-q qiita', '-p qiita'),
- '-l nodes=', '-N '),
- ':ppn=', ' -n '),
- '-l pmem=', '--mem-per-cpu '),
- '-l mem=', '--mem '),
- '-l walltime=', '--time '),
-'-p 1023', '--qos=qiita_prio');
-
-INSERT INTO qiita.filepath_type (filepath_type) VALUES ('bam');
diff --git a/qiita_db/support_files/patches/88.sql b/qiita_db/support_files/patches/88.sql
deleted file mode 100644
index fb2fc1b75..000000000
--- a/qiita_db/support_files/patches/88.sql
+++ /dev/null
@@ -1,3 +0,0 @@
--- May 12, 2023
--- add creation_job_id to qiita.prep_template
-ALTER TABLE qiita.prep_template ADD creation_job_id UUID DEFAULT NULL;
diff --git a/qiita_db/support_files/patches/89.sql b/qiita_db/support_files/patches/89.sql
deleted file mode 100644
index 978369422..000000000
--- a/qiita_db/support_files/patches/89.sql
+++ /dev/null
@@ -1,12 +0,0 @@
--- Nov 1, 2023
--- add slurm/queues changes to support per user_level and analysis parameters
-ALTER TABLE qiita.analysis ADD slurm_reservation VARCHAR DEFAULT '' NOT NULL;
-ALTER TABLE qiita.user_level ADD slurm_parameters VARCHAR DEFAULT '--nice=10000' NOT NULL;
-
-UPDATE qiita.user_level SET slurm_parameters = '--nice=5000' WHERE name = 'admin';
-
-UPDATE qiita.user_level SET slurm_parameters = '' WHERE name = 'wet-lab admin';
-
--- Nov 22, 2023
--- add changes to support workflow per sample/prep info specific parameters values
-ALTER TABLE qiita.default_workflow ADD parameters JSONB DEFAULT '{"sample": {}, "prep": {}}'::JSONB NOT NULL;
diff --git a/qiita_db/support_files/patches/9.sql b/qiita_db/support_files/patches/9.sql
deleted file mode 100644
index e8195a09e..000000000
--- a/qiita_db/support_files/patches/9.sql
+++ /dev/null
@@ -1,8 +0,0 @@
--- Dec 17, 2014
--- Adding the processing status to all preprocessed_data
-
-ALTER TABLE qiita.preprocessed_data ADD processing_status varchar DEFAULT 'not_processed' NOT NULL;
-
--- Make sure that the added field is consistent with the data on the database
-UPDATE qiita.preprocessed_data SET processing_status='processed' WHERE preprocessed_data_id IN (
- SELECT preprocessed_data_id FROM qiita.preprocessed_processed_data);
diff --git a/qiita_db/support_files/patches/90.sql b/qiita_db/support_files/patches/90.sql
deleted file mode 100644
index a0b5d58c9..000000000
--- a/qiita_db/support_files/patches/90.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- Jan 9, 2024
--- add control of max artifacts in analysis to the settings
--- using 35 as default considering that a core div creates ~17 so allowing
--- for 2 of those + 1
-ALTER TABLE settings
- ADD COLUMN IF NOT EXISTS max_artifacts_in_workflow INT DEFAULT 35;
diff --git a/qiita_db/support_files/patches/91.sql b/qiita_db/support_files/patches/91.sql
deleted file mode 100644
index eb05c079c..000000000
--- a/qiita_db/support_files/patches/91.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Feb 19, 2024
--- update qp-target-gene command name to "QIIMEq2" to be in sync with plugin repo
--- When setting up a new instance of Qiita, we end up using qiita-env make
--- which creates entries in the postgress database, also for qiita.software.
--- One of these entries belongs to the qp-target-gene plugin with name "QIIME"
--- and version "1.9.1". However, with
--- qp_target_gene/support_files/patches/171029_QIIME_v191_to_QIIMEq2_v191.sql
--- the plugin was renamed into QIIMEq2, but this change was not relected in
--- the qiita.software table. Thus, updating plugin information finds a mismatch
--- between old (QIIME) and new (QIIMEq2) names and therefor creates a new
--- command. However, the also provided default workflows hold command_ids to
--- the old version and subsequently the commands for artifact processing
--- will result in an empty list, even though the plugin is available.
--- Therefore, this patch updates the name of QIIME.
-UPDATE qiita.software SET name = 'QIIMEq2' WHERE name = 'QIIME';
diff --git a/qiita_db/support_files/patches/python_patches/14.py b/qiita_db/support_files/patches/python_patches/14.py
deleted file mode 100644
index c7d31fcec..000000000
--- a/qiita_db/support_files/patches/python_patches/14.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# Feberuary 7, 2015
-# This patch recreates all the QIIME mapping files to avoid lower/upper case
-# problems. See https://github.com/biocore/qiita/issues/799
-#
-# heavily based on 7.py
-
-from os.path import basename
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- sql = "SELECT prep_template_id FROM qiita.prep_template"
- qdb.sql_connection.TRN.add(sql)
- all_ids = qdb.sql_connection.TRN.execute_fetchflatten()
-
- # remove all the bad mapping files
- for prep_template_id in all_ids:
- pt = qdb.metadata_template.prep_template.PrepTemplate(prep_template_id)
- fps = pt.get_filepaths()
-
- # get the QIIME mapping file, note that the way to figure out what is
- # and what's not a qiime mapping file is to check for the existance of
- # the word qiime in the basename of the file path, hacky but that's
- # the way it is being done in qiita_pet/uimodules/raw_data_tab.py
- mapping_files = [f for f in fps if '_qiime_' in basename(f[1])]
-
- table = 'prep_template_filepath'
- column = 'prep_template_id'
-
- # unlink all the qiime mapping files for this prep template object
- for mf in mapping_files:
-
- # (1) get the ids that we are going to delete.
- # because of the FK restriction, we cannot just delete the ids
- sql = """SELECT filepath_id
- FROM qiita.{0}
- WHERE {1}=%s AND filepath_id=%s""".format(table, column)
- qdb.sql_connection.TRN.add(sql, [pt.id, mf[0]])
- ids = qdb.sql_connection.TRN.execute_fetchflatten()
-
- # (2) delete the entries from the prep_template_filepath table
- sql = """DELETE FROM qiita.{0}
- WHERE {1}=%s and filepath_id=%s""".format(table, column)
- qdb.sql_connection.TRN.add(sql, [pt.id, mf[0]])
-
- # (3) delete the entries from the filepath table
- sql = "DELETE FROM qiita.filepath WHERE filepath_id IN %s"
- qdb.sql_connection.TRN.add(sql, [tuple(ids)])
-
- qdb.sql_connection.TRN.execute()
-
- # create correct versions of the mapping files
- for prep_template_id in all_ids:
-
- prep_template_id = prep_template_id[0]
- pt = qdb.metadata_template.prep_template.PrepTemplate(prep_template_id)
-
- # we can guarantee that all the filepaths will be prep templates so
- # we can just generate the qiime mapping files
- for _, fpt in pt.get_filepaths():
- pt.create_qiime_mapping_file(fpt)
diff --git a/qiita_db/support_files/patches/python_patches/15.py b/qiita_db/support_files/patches/python_patches/15.py
deleted file mode 100644
index 089d369dd..000000000
--- a/qiita_db/support_files/patches/python_patches/15.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# Feb 11, 2015
-# This changes all analysis files to be relative path instead of absolute
-
-from os.path import basename, dirname
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- sql = """SELECT f.*
- FROM qiita.filepath f
- JOIN qiita.analysis_filepath afp
- ON f.filepath_id = afp.filepath_id"""
- qdb.sql_connection.TRN.add(sql)
- filepaths = qdb.sql_connection.TRN.execute_fetchindex()
-
- # retrieve relative filepaths as dictionary for matching
- mountpoints = {m[1].rstrip('/\\'): m[0] for m in qdb.util.get_mountpoint(
- 'analysis', retrieve_all=True)}
-
- sql = """UPDATE qiita.filepath SET filepath = %s, data_directory_id = %s
- WHERE filepath_id = %s"""
- for filepath in filepaths:
- filename = basename(filepath['filepath'])
- # find the ID of the analysis filepath used
- mp_id = mountpoints[dirname(filepath['filepath']).rstrip('/\\')]
- qdb.sql_connection.TRN.add(
- sql, [filename, mp_id, filepath['filepath_id']])
-
- qdb.sql_connection.TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/23.py b/qiita_db/support_files/patches/python_patches/23.py
deleted file mode 100644
index 105eb682b..000000000
--- a/qiita_db/support_files/patches/python_patches/23.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# Mar 27, 2015
-# Need to re-generate the files, given that some headers have changed
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- # Get all the sample templates
- qdb.sql_connection.TRN.add(
- "SELECT DISTINCT study_id from qiita.study_sample")
- study_ids = qdb.sql_connection.TRN.execute_fetchflatten()
-
- for s_id in study_ids:
- st = qdb.metadata_template.sample_template.SampleTemplate(s_id)
- st.generate_files()
-
- # Get all the prep templates
- qdb.sql_connection.TRN.add(
- "SELECT DISTINCT prep_template_id from qiita.prep_template")
- prep_ids = qdb.sql_connection.TRN.execute_fetchflatten()
- for prep_id in prep_ids:
- pt = qdb.metadata_template.prep_template.PrepTemplate(prep_id)
- pt.generate_files()
diff --git a/qiita_db/support_files/patches/python_patches/25.py b/qiita_db/support_files/patches/python_patches/25.py
deleted file mode 100644
index e26d6b380..000000000
--- a/qiita_db/support_files/patches/python_patches/25.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# May 19, 2015
-# We attach the prep template directly to the study. The raw data is no longer
-# attached to the study directly, the prep template points to them. This will
-# make the RawData to be effectively just a container for the raw files,
-# which is how it was acting previously.
-
-from os.path import join
-from functools import partial
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- # the system may contain raw data with no prep template associated to it.
- # Retrieve all those raw data ids
- sql = """SELECT raw_data_id
- FROM qiita.raw_data
- WHERE raw_data_id NOT IN (
- SELECT DISTINCT raw_data_id FROM qiita.prep_template);"""
- qdb.sql_connection.TRN.add(sql)
- rd_ids = qdb.sql_connection.TRN.execute_fetchflatten()
-
- # We will delete those RawData. However, if they have files attached, we
- # should move them to the uploads folder of the study
- sql_detach = """DELETE FROM qiita.study_raw_data
- WHERE raw_data_id = %s AND study_id = %s"""
- sql_unlink = "DELETE FROM qiita.raw_filepath WHERE raw_data_id = %s"
- sql_delete = "DELETE FROM qiita.raw_data WHERE raw_data_id = %s"
- sql_studies = """SELECT study_id FROM qiita.study_raw_data
- WHERE raw_data_id = %s"""
- move_files = []
- for rd_id in rd_ids:
- sql = """SELECT filepath_id, filepath, filepath_type_id
- FROM qiita.filepath
- WHERE filepath_id IN (
- SELECT filepath_id
- FROM qiita.raw_filepath
- WHERE raw_data_id = %s)"""
- qdb.sql_connection.TRN.add(sql, [rd_id])
- db_paths = qdb.sql_connection.TRN.execute_fetchindex()
- fb = qdb.util.get_mountpoint("raw_data")[0][1]
- base_fp = partial(join, fb)
- filepaths = [
- (fpid, base_fp(fp), qdb.util.convert_from_id(fid, "filepath_type"))
- for fpid, fp, fid in db_paths]
-
- qdb.sql_connection.TRN.add(sql_studies, [rd_id])
- studies = qdb.sql_connection.TRN.execute_fetchflatten()
- if filepaths:
- # we need to move the files to a study. We chose the one with lower
- # study id. Currently there is no case in the live database in
- # which a RawData with no prep templates is attached to more than
- # one study, but I think it is better to normalize this just
- # in case
- qdb.util.move_filepaths_to_upload_folder(min(studies), filepaths)
-
- # To delete the RawData we first need to unlink all the files
- qdb.sql_connection.TRN.add(sql_unlink, [rd_id])
-
- # Then, remove the raw data from all the studies
- for st_id in studies:
- qdb.sql_connection.TRN.add(sql_detach, [rd_id, st_id])
-
- qdb.sql_connection.TRN.add(sql_delete, [rd_id])
-
- # We can now perform all changes in the DB. Although these changes can be
- # done in an SQL patch, they are done here because we need to execute the
- # previous clean up in the database before we can actually execute the SQL
- # patch.
- sql = """CREATE TABLE qiita.study_prep_template (
- study_id bigint NOT NULL,
- prep_template_id bigint NOT NULL,
- CONSTRAINT idx_study_prep_template
- PRIMARY KEY ( study_id, prep_template_id )
- );
-
- CREATE INDEX idx_study_prep_template_0
- ON qiita.study_prep_template ( study_id );
-
- CREATE INDEX idx_study_prep_template_1
- ON qiita.study_prep_template ( prep_template_id );
-
- COMMENT ON TABLE qiita.study_prep_template IS
- 'links study to its prep templates';
-
- ALTER TABLE qiita.study_prep_template
- ADD CONSTRAINT fk_study_prep_template_study
- FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id );
-
- ALTER TABLE qiita.study_prep_template
- ADD CONSTRAINT fk_study_prep_template_pt
- FOREIGN KEY ( prep_template_id )
- REFERENCES qiita.prep_template( prep_template_id );
-
- -- Connect the existing prep templates in the system with their studies
- DO $do$
- DECLARE
- vals RECORD;
- BEGIN
- FOR vals IN
- SELECT prep_template_id, study_id
- FROM qiita.prep_template
- JOIN qiita.study_raw_data USING (raw_data_id)
- LOOP
- INSERT INTO qiita.study_prep_template (study_id, prep_template_id)
- VALUES (vals.study_id, vals.prep_template_id);
- END LOOP;
- END $do$;
-
- --- Drop the study_raw__data table as it's not longer used
- DROP TABLE qiita.study_raw_data;
-
- -- The raw_data_id column now can be nullable
- ALTER TABLE qiita.prep_template
- ALTER COLUMN raw_data_id DROP NOT NULL;
- """
- qdb.sql_connection.TRN.add(sql)
- qdb.sql_connection.TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/30.py b/qiita_db/support_files/patches/python_patches/30.py
deleted file mode 100644
index 5fbe91697..000000000
--- a/qiita_db/support_files/patches/python_patches/30.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-import qiita_db as qdb
-
-st_update = set()
-pr_update = set()
-
-with qdb.sql_connection.TRN:
- sql = r"""SELECT table_name
- FROM information_schema.tables
- WHERE table_schema='qiita'
- AND (table_name SIMILAR TO 'sample\_[0-9]+'
- OR table_name SIMILAR TO 'prep\_[0-9]+')"""
- qdb.sql_connection.TRN.add(sql)
- tables = qdb.sql_connection.TRN.execute_fetchflatten()
-
- cols_sql = """SELECT column_name
- FROM information_schema.columns
- WHERE table_name = %s
- AND data_type = 'character varying'"""
- alter_sql = """ALTER TABLE qiita.{0}
- ALTER COLUMN {1} TYPE bool
- USING CASE
- WHEN {1} IN %s THEN FALSE
- WHEN {1} IN %s THEN TRUE
- END"""
- null_sql = "UPDATE qiita.{0} SET {1} = NULL WHERE {1} IN %s"
- ssc_update_sql = """UPDATE qiita.study_sample_columns
- SET column_type = 'bool'
- WHERE study_id = %s AND column_name = %s"""
- pc_update_sql = """UPDATE qiita.prep_columns
- SET column_type = 'bool'
- WHERE prep_template_id = %s AND column_name = %s"""
-
- for table in tables:
- table_id = table.split("_")[1]
- # Change NaN values to NULL in database
- qdb.sql_connection.TRN.add(cols_sql, [table])
- cols = qdb.sql_connection.TRN.execute_fetchflatten()
- qdb.sql_connection.TRN.execute()
-
- # Update now boolean columns to bool in database
- qdb.sql_connection.TRN.add(
- "SELECT {0} FROM qiita.{1}".format(','.join(cols), table))
- col_vals = zip(*qdb.sql_connection.TRN.execute_fetchindex())
- for col, vals in zip(cols, col_vals):
- if set(vals) == {None}:
- # Ignore columns that are all NULL
- continue
-
- qdb.sql_connection.TRN.execute()
- for stid in st_update:
- stid = int(stid)
- qdb.metadata_template.sample_template.SampleTemplate(
- stid).generate_files()
- for pt_id in qdb.study.Study(stid).prep_templates():
- pr_update.discard(pt_id)
- for prid in pr_update:
- qdb.metadata_template.prep_template.PrepTemplate(
- int(prid)).generate_files()
diff --git a/qiita_db/support_files/patches/python_patches/31.py b/qiita_db/support_files/patches/python_patches/31.py
deleted file mode 100644
index e603a3fbc..000000000
--- a/qiita_db/support_files/patches/python_patches/31.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from os.path import realpath
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- qdb.sql_connection.TRN.add('SELECT base_data_dir FROM settings')
- path = qdb.sql_connection.TRN.execute_fetchlast()
-
- # if the path is non-canonical (it contains .. or other redundant symbols)
- # this will update it, else it will leave as is
- qdb.sql_connection.TRN.add(
- "UPDATE settings SET base_data_dir = %s", (realpath(path),))
- qdb.sql_connection.TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/34.py b/qiita_db/support_files/patches/python_patches/34.py
deleted file mode 100644
index a81972caa..000000000
--- a/qiita_db/support_files/patches/python_patches/34.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from random import SystemRandom
-from string import ascii_letters, digits
-
-from qiita_db.sql_connection import TRN
-
-pool = ascii_letters + digits
-client_id = ''.join([SystemRandom().choice(pool) for _ in range(50)])
-client_secret = ''.join([SystemRandom().choice(pool) for _ in range(255)])
-
-with TRN:
- sql = """INSERT INTO qiita.oauth_identifiers (client_id, client_secret)
- VALUES (%s, %s)"""
- TRN.add(sql, [client_id, client_secret])
-
- sql = """INSERT INTO qiita.oauth_software (software_id, client_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [1, client_id])
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/36.py b/qiita_db/support_files/patches/python_patches/36.py
deleted file mode 100644
index 07373ca3d..000000000
--- a/qiita_db/support_files/patches/python_patches/36.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from random import SystemRandom
-from string import ascii_letters, digits
-from os.path import exists, join, basename
-from tarfile import open as taropen
-
-from qiita_db.sql_connection import TRN
-from qiita_db.artifact import Artifact
-from qiita_db.util import (insert_filepaths, convert_to_id, get_mountpoint,
- get_mountpoint_path_by_id)
-
-
-pool = ascii_letters + digits
-tgz_id = convert_to_id("tgz", "filepath_type")
-_id, analysis_mp = get_mountpoint('analysis')[0]
-with TRN:
- # 2 and 3 are the ids of the 2 new software rows, the BIOM and
- # target gene type plugins
- for i in [2, 3]:
- client_id = ''.join([SystemRandom().choice(pool) for _ in range(50)])
- client_secret = ''.join(
- [SystemRandom().choice(pool) for _ in range(255)])
-
- sql = """INSERT INTO qiita.oauth_identifiers (client_id, client_secret)
- VALUES (%s, %s)"""
- TRN.add(sql, [client_id, client_secret])
-
- sql = """INSERT INTO qiita.oauth_software (software_id, client_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [i, client_id])
- TRN.execute()
-
- #
- # Generating compressed files for picking failures -- artifact_type = BIOM
- #
- sql = """SELECT artifact_id FROM qiita.artifact
- JOIN qiita.artifact_type USING (artifact_type_id)
- WHERE artifact_type = 'BIOM'"""
- TRN.add(sql)
-
- for r in TRN.execute_fetchindex():
- to_tgz = None
- a = Artifact(r[0])
- for x in a.filepaths:
- if x['fp_type'] == 'directory':
- # removing / from the path if it exists
- to_tgz = x['fp'][:-1] if x['fp'][-1] == '/' else x['fp']
- break
-
- if to_tgz is None:
- continue
-
- tgz = to_tgz + '.tgz'
- if not exists(tgz):
- with taropen(tgz, "w:gz") as tar:
- tar.add(to_tgz, arcname=basename(to_tgz))
-
- a_id = a.id
- # Add the new tgz file to the artifact.
- fp_ids = insert_filepaths([(tgz, tgz_id)], a_id, a.artifact_type,
- move_files=False)
- sql = """INSERT INTO qiita.artifact_filepath
- (artifact_id, filepath_id)
- VALUES (%s, %s)"""
- sql_args = [[a_id, fp_id] for fp_id in fp_ids]
- TRN.add(sql, sql_args, many=True)
- TRN.execute()
-
- #
- # Generating compressed files for analysis
- #
- TRN.add("SELECT analysis_id FROM qiita.analysis")
- for result in TRN.execute_fetchindex():
- analysis_id = result[0]
- # retrieving all analysis filepaths, we could have used
- # Analysis.all_associated_filepath_ids but we could run into the
- # analysis not belonging to the current portal, thus using SQL
-
- sql = """SELECT filepath, data_directory_id
- FROM qiita.filepath
- JOIN qiita.analysis_filepath USING (filepath_id)
- WHERE analysis_id = %s"""
- TRN.add(sql, [analysis_id])
- fps = set([tuple(r) for r in TRN.execute_fetchindex()])
- sql = """SELECT filepath, data_directory_id
- FROM qiita.analysis_job
- JOIN qiita.job USING (job_id)
- JOIN qiita.job_results_filepath USING (job_id)
- JOIN qiita.filepath USING (filepath_id)
- WHERE analysis_id = %s"""
- TRN.add(sql, [analysis_id])
- fps = fps.union([tuple(r) for r in TRN.execute_fetchindex()])
-
- # no filepaths in the analysis
- if not fps:
- continue
-
- tgz = join(analysis_mp, '%d_files.tgz' % analysis_id)
- if not exists(tgz):
- full_fps = [join(get_mountpoint_path_by_id(mid), f)
- for f, mid in fps]
- with taropen(tgz, "w:gz") as tar:
- for f in full_fps:
- tar.add(f, arcname=basename(f))
-
- # Add the new tgz file to the analysis.
- fp_ids = insert_filepaths([(tgz, tgz_id)], analysis_id, 'analysis',
- move_files=False)
- sql = """INSERT INTO qiita.analysis_filepath
- (analysis_id, filepath_id)
- VALUES (%s, %s)"""
- sql_args = [[analysis_id, fp_id] for fp_id in fp_ids]
- TRN.add(sql, sql_args, many=True)
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/38.py b/qiita_db/support_files/patches/python_patches/38.py
deleted file mode 100644
index c92be6e26..000000000
--- a/qiita_db/support_files/patches/python_patches/38.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from qiita_db.sql_connection import TRN
-
-
-# Due to the size of these changes we will
-with TRN:
- # select all table and column names from all sample template
- sql = """SELECT DISTINCT table_name FROM information_schema.columns
- WHERE (table_name LIKE 'sample_%'
- OR table_name LIKE 'prep_%')
- AND table_name NOT LIKE '%template%'"""
- TRN.add(sql)
-
- all_tables = TRN.execute_fetchflatten()
-
-for table in all_tables:
- with TRN:
- sql = """SELECT column_name FROM information_schema.columns
- WHERE table_name = %s
- ORDER BY column_name"""
- TRN.add(sql, [table])
-
- for column in TRN.execute_fetchflatten():
- sql = "ALTER TABLE qiita.%s ALTER COLUMN %s TYPE VARCHAR" % (
- table, column)
- TRN.add(sql)
-
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/43.py b/qiita_db/support_files/patches/python_patches/43.py
deleted file mode 100644
index 9f7194f2a..000000000
--- a/qiita_db/support_files/patches/python_patches/43.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-import qiita_db as qdb
-
-
-PJ = qdb.processing_job.ProcessingJob
-
-# selecting all artifact ids
-with qdb.sql_connection.TRN:
- sql = """SELECT artifact_id FROM qiita.artifact"""
- qdb.sql_connection.TRN.add(sql, [])
-
- all_artifacts = qdb.sql_connection.TRN.execute_fetchindex()
-
-nodes = {}
-for aid in all_artifacts:
- aid = aid[0]
- with qdb.sql_connection.TRN:
- sql = """SELECT parent_id, artifact_id
- FROM qiita.artifact_descendants(%s)"""
- qdb.sql_connection.TRN.add(sql, [aid])
- edges = [tuple(e)
- for e in qdb.sql_connection.TRN.execute_fetchindex()]
-
- for parent, child in edges:
- # By creating all the artifacts here we are saving DB calls
- if parent not in nodes:
- nodes[parent] = qdb.artifact.Artifact(parent)
- if child not in nodes:
- nodes[child] = qdb.artifact.Artifact(child)
-
- job_id = None
- with qdb.sql_connection.TRN:
- sql = """SELECT processing_job_id
- FROM qiita.artifact_processing_job
- JOIN qiita.processing_job USING (processing_job_id)
- JOIN qiita.processing_job_status USING
- (processing_job_status_id)
- WHERE artifact_id = %s"""
- qdb.sql_connection.TRN.add(sql, [nodes[parent].id])
- job_ids = qdb.sql_connection.TRN.execute_fetchflatten()
-
- for j_id in job_ids:
- job = qdb.processing_job.ProcessingJob(j_id)
- if job.status == 'success' and job.outputs:
- for _, a in job.outputs.items():
- if a.id == child:
- job_id = job.id
- break
- if job_id is None:
- # inserting the missing values
-
- c = nodes[child]
- cmd_out = c.artifact_type
- if cmd_out == 'Demultiplexed':
- cmd_out = 'demultiplexed'
- elif cmd_out == 'BIOM':
- cmd_out = 'OTU table'
- else:
- # the actual DB has other possible values in
- # artifact_type
- continue
-
- cmd_out_id = qdb.util.convert_to_id(
- cmd_out, "command_output", "name")
-
- # the owner of the study will create the job
- job = PJ.create(c.study.owner, c.processing_parameters, True)
- with qdb.sql_connection.TRN:
- sql = """INSERT INTO
- qiita.artifact_output_processing_job
- (artifact_id, processing_job_id,
- command_output_id)
- VALUES (%s, %s, %s)"""
- qdb.sql_connection.TRN.add(
- sql, [child, job.id, cmd_out_id])
-
- job._update_children({parent: child})
- job._set_status('success')
diff --git a/qiita_db/support_files/patches/python_patches/45.py b/qiita_db/support_files/patches/python_patches/45.py
deleted file mode 100644
index 356497393..000000000
--- a/qiita_db/support_files/patches/python_patches/45.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-from qiita_db.metadata_template.sample_template import SampleTemplate
-from qiita_db.metadata_template.prep_template import PrepTemplate
-from qiita_db.sql_connection import TRN
-
-with TRN:
- # a few notes: just getting the preps with duplicated values; ignoring
- # column 'sample_id' and tables 'study_sample', 'prep_template',
- # 'prep_template_sample'
- sql = """SELECT table_name, array_agg(column_name::text)
- FROM information_schema.columns
- WHERE column_name IN %s
- AND column_name != 'sample_id'
- AND table_name LIKE 'prep_%%'
- AND table_name NOT IN (
- 'prep_template', 'prep_template_sample')
- GROUP BY table_name"""
- # note that we are looking for those columns with duplicated names in
- # the headers
- headers = set(PrepTemplate.metadata_headers()) & \
- set(SampleTemplate.metadata_headers())
-
- if headers:
- TRN.add(sql, [tuple(headers)])
- overlapping = dict(TRN.execute_fetchindex())
- else:
- overlapping = None
-
-if overlapping is not None:
- # finding actual duplicates
- for table_name, cols in overlapping.items():
- # leaving print so when we patch in the main system we know that
- # nothing was renamed or deal with that
- print(table_name)
- with TRN:
- for c in cols:
- sql = 'ALTER TABLE qiita.%s RENAME COLUMN %s TO %s_renamed' % (
- table_name, c, c)
- TRN.add(sql)
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/46.py b/qiita_db/support_files/patches/python_patches/46.py
deleted file mode 100644
index 5b1363380..000000000
--- a/qiita_db/support_files/patches/python_patches/46.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-import qiita_db as qdb
-
-
-# selecting all doi/pubmedids
-with qdb.sql_connection.TRN:
- sql = """SELECT p.doi, pubmed_id, study_id
- FROM qiita.study_publication AS sp
- LEFT JOIN qiita.publication AS p ON (sp.publication = p.doi)
- WHERE p.doi NOT IN (
- SELECT publication_doi FROM qiita.software_publication)"""
- qdb.sql_connection.TRN.add(sql)
-
- pubs = qdb.sql_connection.TRN.execute_fetchindex()
-
- # deleting all references to start from scratch
- sql = """DELETE FROM qiita.study_publication"""
- qdb.sql_connection.TRN.add(sql)
- qdb.sql_connection.TRN.execute()
-
- # reinserting following the new structure
- for doi, pid, sid in pubs:
- to_insert = []
- if doi is not None:
- to_insert.append([doi, True, sid])
- if pid not in to_insert:
- to_insert.append([pid, False, sid])
-
- sql = """INSERT INTO qiita.study_publication
- (publication, is_doi, study_id)
- VALUES (%s, %s, %s)"""
- qdb.sql_connection.TRN.add(sql, to_insert, many=True)
- qdb.sql_connection.TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/47.py b/qiita_db/support_files/patches/python_patches/47.py
deleted file mode 100644
index 14e450592..000000000
--- a/qiita_db/support_files/patches/python_patches/47.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from qiita_db.study import Study
-
-
-class ForRecursion(object):
- """for some strange reason, my guess is how we are executing the patches
- recursion doesn't work directly so decided to use a class to make it
- work"""
-
- @classmethod
- def change_status(cls, artifact, status):
- for a in artifact.children:
- try:
- a.visibility = status
- except Exception:
- # print so we know which changes failed and we can deal by hand
- print("failed aid: %d, status %s" % (artifact.id, status))
- return
- cls.change_status(a, status)
-
-
-studies = Study.get_by_status('private').union(
- Study.get_by_status('public')).union(Study.get_by_status('sandbox'))
-# just getting the base artifacts, no parents
-artifacts = {a for s in studies for a in s.artifacts() if not a.parents}
-
-# inheriting status
-fr = ForRecursion
-for a in artifacts:
- status = a.visibility
- fr.change_status(a, status)
diff --git a/qiita_db/support_files/patches/python_patches/48.py b/qiita_db/support_files/patches/python_patches/48.py
deleted file mode 100644
index 4c99e1de4..000000000
--- a/qiita_db/support_files/patches/python_patches/48.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# replacing all \t and \n for space as those chars brake QIIME
-
-from qiita_db.study import Study
-from qiita_db.sql_connection import TRN
-
-
-def searcher(df):
- search = r"\t|\n"
-
- return [col for col in df
- if df[col].str.contains(search, na=False, regex=True).any()]
-
-
-studies = Study.get_by_status('private').union(
- Study.get_by_status('public')).union(Study.get_by_status('sandbox'))
-
-# we will start search using pandas as is much easier and faster
-# than using pgsql. remember that to_dataframe actually transforms what's
-# in the db
-to_fix = []
-for s in studies:
- st = s.sample_template
- if st is None:
- continue
- cols = searcher(st.to_dataframe())
- if cols:
- to_fix.append((st, cols))
-
- for pt in s.prep_templates():
- if pt is None:
- continue
- cols = searcher(pt.to_dataframe())
- if cols:
- to_fix.append((pt, cols))
-
-
-# now let's fix the database and regenerate the files
-for infofile, cols in to_fix:
- with TRN:
- for col in cols:
- # removing tabs
- sql = """UPDATE qiita.{0}{1}
- SET {2} = replace({2}, chr(9), ' ')""".format(
- infofile._table_prefix, infofile.id, col)
- TRN.add(sql)
-
- # removing enters
- sql = """UPDATE qiita.{0}{1}
- SET {2} = regexp_replace(
- {2}, E'[\\n\\r\\u2028]+', ' ', 'g' )""".format(
- infofile._table_prefix, infofile.id, col)
- TRN.add(sql)
-
- TRN.execute()
-
- infofile.generate_files()
diff --git a/qiita_db/support_files/patches/python_patches/51.py b/qiita_db/support_files/patches/python_patches/51.py
deleted file mode 100644
index 62c86e02e..000000000
--- a/qiita_db/support_files/patches/python_patches/51.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from datetime import datetime
-
-from qiita_db.metadata_template.constants import (
- SAMPLE_TEMPLATE_COLUMNS, PREP_TEMPLATE_COLUMNS,
- PREP_TEMPLATE_COLUMNS_TARGET_GENE)
-from qiita_db.metadata_template.prep_template import PrepTemplate
-from qiita_db.metadata_template.sample_template import SampleTemplate
-from qiita_db.sql_connection import TRN
-
-
-# getting columns in each info file that we need to check for
-cols_sample = [col
- for key, vals in SAMPLE_TEMPLATE_COLUMNS.items()
- for col, dt in vals.columns.items() if dt == datetime]
-cols_prep = [col
- for key, vals in PREP_TEMPLATE_COLUMNS.items()
- for col, dt in vals.columns.items() if dt == datetime].extend(
- [col
- for key, vals in PREP_TEMPLATE_COLUMNS_TARGET_GENE.items()
- for col, dt in vals.columns.items()])
-
-
-def transform_date(value):
- # for the way the patches are applied we need to have this import and
- # the next 2 variables within this function
- from datetime import datetime
-
- # old format : new format
- formats = {
- # 4 digits year
- '%m/%d/%Y %H:%M:%S': '%Y-%m-%d %H:%M:%S',
- '%m-%d-%Y %H:%M': '%Y-%m-%d %H:%M',
- '%m/%d/%Y %H': '%Y-%m-%d %H',
- '%m-%d-%Y': '%Y-%m-%d',
- '%m-%Y': '%Y-%m',
- '%Y': '%Y',
- # 2 digits year
- '%m/%d/%y %H:%M:%S': '%Y-%m-%d %H:%M:%S',
- '%m-%d-%y %H:%M': '%Y-%m-%d %H:%M',
- '%m/%d/%y %H': '%Y-%m-%d %H',
- '%m-%d-%y': '%Y-%m-%d',
- '%m-%y': '%Y-%m',
- '%y': '%Y'
- }
-
- # loop over the old formats to see which one is it
- if value is not None:
- date = None
- for i, fmt in enumerate(formats):
- try:
- date = datetime.strptime(value, fmt)
- break
- except ValueError:
- pass
- if date is not None:
- value = date.strftime(formats[fmt])
-
- return value
-
-
-if cols_sample:
- with TRN:
- # a few notes: just getting the preps with duplicated values; ignoring
- # column 'sample_id' and tables 'study_sample', 'prep_template',
- # 'prep_template_sample'
- sql = """SELECT table_name, array_agg(column_name::text)
- FROM information_schema.columns
- WHERE column_name IN %s
- AND table_name LIKE 'sample_%%'
- AND table_name NOT IN (
- 'prep_template', 'prep_template_sample')
- GROUP BY table_name"""
- # note that we are looking for those columns with duplicated names in
- # the headers
- TRN.add(sql, [tuple(set(cols_sample))])
- for table, columns in dict(TRN.execute_fetchindex()).items():
- # [1] the format is table_# so taking the #
- st = SampleTemplate(int(table.split('_')[1]))
- # getting just the columns of interest
- st_df = st.to_dataframe()[columns]
- # converting to datetime
- for col in columns:
- st_df[col] = st_df[col].apply(transform_date)
- st.update(st_df)
-
-if cols_prep:
- with TRN:
- # a few notes: just getting the preps with duplicated values; ignoring
- # column 'sample_id' and tables 'study_sample', 'prep_template',
- # 'prep_template_sample'
- sql = """SELECT table_name, array_agg(column_name::text)
- FROM information_schema.columns
- WHERE column_name IN %s
- AND table_name LIKE 'prep_%%'
- AND table_name NOT IN (
- 'prep_template', 'prep_template_sample')
- GROUP BY table_name"""
- # note that we are looking for those columns with duplicated names in
- # the headers
- TRN.add(sql, [tuple(set(cols_prep))])
- for table, columns in dict(TRN.execute_fetchindex()).items():
- # [1] the format is table_# so taking the #
- pt = PrepTemplate(int(table.split('_')[1]))
- # getting just the columns of interest
- pt_df = pt.to_dataframe()[columns]
- # converting to datetime
- for col in columns:
- pt_df[col] = pt_df[col].apply(transform_date)
- pt.update(pt_df)
diff --git a/qiita_db/support_files/patches/python_patches/53.py b/qiita_db/support_files/patches/python_patches/53.py
deleted file mode 100644
index 4051463b6..000000000
--- a/qiita_db/support_files/patches/python_patches/53.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from qiita_db.study import Study
-
-studies = Study.get_by_status('private').union(
- Study.get_by_status('public')).union(Study.get_by_status('sandbox'))
-raw_data = [pt.artifact for s in studies for pt in s.prep_templates()
- if pt.artifact is not None]
-
-for rd in raw_data:
- # getting the most open visibility of all the children in the pipeline
- children = rd.descendants.nodes()
- vis = [a.visibility for a in children]
- vis.append(rd.visibility)
-
- new_vis = 'sandbox'
- if 'public' in vis:
- new_vis = 'public'
- elif 'private' in vis:
- new_vis = 'private'
-
- rd.visibility = new_vis
diff --git a/qiita_db/support_files/patches/python_patches/54.py b/qiita_db/support_files/patches/python_patches/54.py
deleted file mode 100644
index a63ea29cd..000000000
--- a/qiita_db/support_files/patches/python_patches/54.py
+++ /dev/null
@@ -1,699 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# The code is commented with details on the changes implemented here,
-# but here is an overview of the changes needed to transfer the analysis
-# data to the plugins structure:
-# 1) Create a new type plugin to define the diversity types
-# 2) Create the new commands on the existing QIIME plugin to execute the
-# existing analyses (beta div, taxa summaries and alpha rarefaction)
-# 3) Transfer all the data in the old structures to the plugin structures
-# 4) Delete old structures
-
-from string import ascii_letters, digits
-from random import SystemRandom
-from os.path import join, exists, basename
-from os import makedirs
-from json import loads
-
-from biom import load_table, Table
-from biom.util import biom_open
-
-from qiita_db.sql_connection import TRN
-from qiita_db.util import (get_db_files_base_dir, purge_filepaths,
- get_mountpoint, compute_checksum)
-from qiita_db.artifact import Artifact
-
-# Create some aux functions that are going to make the code more modular
-# and easier to understand, since there is a fair amount of work to do to
-# trasnfer the data from the old structure to the new one
-
-
-def get_random_string(length):
- """Creates a random string of the given length with alphanumeric chars
-
- Parameters
- ----------
- length : int
- The desired length of the string
-
- Returns
- -------
- str
- The new random string
- """
- sr = SystemRandom()
- chars = ascii_letters + digits
- return ''.join(sr.choice(chars) for i in range(length))
-
-
-def create_non_rarefied_biom_artifact(analysis, biom_data, rarefied_table):
- """Creates the initial non-rarefied BIOM artifact of the analysis
-
- Parameters
- ----------
- analysis : dict
- Dictionary with the analysis information
- biom_data : dict
- Dictionary with the biom file information
- rarefied_table : biom.Table
- The rarefied BIOM table
-
- Returns
- -------
- int
- The id of the new artifact
- """
- # The non rarefied biom artifact is the initial biom table of the analysis.
- # This table does not currently exist anywhere, so we need to actually
- # create the BIOM file. To create this BIOM file we need: (1) the samples
- # and artifacts they come from and (2) whether the samples where
- # renamed or not. (1) is on the database, but we need to inferr (2) from
- # the existing rarefied BIOM table. Fun, fun...
-
- with TRN:
- # Get the samples included in the BIOM table grouped by artifact id
- # Note that the analysis contains a BIOM table per data type included
- # in it, and the table analysis_sample does not differentiate between
- # datatypes, so we need to check the data type in the artifact table
- sql = """SELECT artifact_id, array_agg(sample_id)
- FROM qiita.analysis_sample
- JOIN qiita.artifact USING (artifact_id)
- WHERE analysis_id = %s AND data_type_id = %s
- GROUP BY artifact_id"""
- TRN.add(sql, [analysis['analysis_id'], biom_data['data_type_id']])
- samples_by_artifact = TRN.execute_fetchindex()
-
- # Create an empty BIOM table to be the new master table
- new_table = Table([], [], [])
- ids_map = {}
- for a_id, samples in samples_by_artifact:
- # Get the filepath of the BIOM table from the artifact
- artifact = Artifact(a_id)
- biom_fp = None
- for x in artifact.filepaths:
- if x['fp_type'] == 'biom':
- biom_fp = x['fp']
- # Note that we are sure that the biom table exists for sure, so
- # no need to check if biom_fp is undefined
- biom_table = load_table(biom_fp)
- samples = set(samples).intersection(biom_table.ids())
- biom_table.filter(samples, axis='sample', inplace=True)
- # we need to check if the table has samples left before merging
- if biom_table.shape[0] != 0 and biom_table.shape[1] != 0:
- new_table = new_table.merge(biom_table)
- ids_map.update({sid: "%d.%s" % (a_id, sid)
- for sid in biom_table.ids()})
-
- # Check if we need to rename the sample ids in the biom table
- new_table_ids = set(new_table.ids())
- if not new_table_ids.issuperset(rarefied_table.ids()):
- # We need to rename the sample ids
- new_table.update_ids(ids_map, 'sample', True, True)
-
- sql = """INSERT INTO qiita.artifact
- (generated_timestamp, data_type_id, visibility_id,
- artifact_type_id, submitted_to_vamps)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING artifact_id"""
- # Magic number 4 -> visibility sandbox
- # Magix number 7 -> biom artifact type
- TRN.add(sql, [analysis['timestamp'], biom_data['data_type_id'],
- 4, 7, False])
- artifact_id = TRN.execute_fetchlast()
-
- # Associate the artifact with the analysis
- sql = """INSERT INTO qiita.analysis_artifact
- (analysis_id, artifact_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [analysis['analysis_id'], artifact_id])
- # Link the artifact with its file
- dd_id, mp = get_mountpoint('BIOM')[0]
- dir_fp = join(get_db_files_base_dir(), mp, str(artifact_id))
- if not exists(dir_fp):
- makedirs(dir_fp)
- new_table_fp = join(dir_fp, "biom_table.biom")
- with biom_open(new_table_fp, 'w') as f:
- new_table.to_hdf5(f, "Generated by Qiita")
-
- sql = """INSERT INTO qiita.filepath
- (filepath, filepath_type_id, checksum,
- checksum_algorithm_id, data_directory_id)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING filepath_id"""
- # Magic number 7 -> filepath_type_id = 'biom'
- # Magic number 1 -> the checksum algorithm id
- TRN.add(sql, [basename(new_table_fp), 7,
- compute_checksum(new_table_fp), 1, dd_id])
- fp_id = TRN.execute_fetchlast()
- sql = """INSERT INTO qiita.artifact_filepath
- (artifact_id, filepath_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [artifact_id, fp_id])
- TRN.execute()
-
- return artifact_id
-
-
-def create_rarefaction_job(depth, biom_artifact_id, analysis, srare_cmd_id):
- """Create a new rarefaction job
-
- Parameters
- ----------
- depth : int
- The rarefaction depth
- biom_artifact_id : int
- The artifact id of the input rarefaction biom table
- analysis : dict
- Dictionary with the analysis information
- srare_cmd_id : int
- The command id of the single rarefaction command
-
- Returns
- -------
- job_id : str
- The job id
- params : str
- The job parameters
- """
- # Add the row in the procesisng job table
- params = ('{"depth":%d,"subsample_multinomial":false,"biom_table":%s}'
- % (depth, biom_artifact_id))
- with TRN:
- # magic number 3: status -> success
- sql = """INSERT INTO qiita.processing_job
- (email, command_id, command_parameters,
- processing_job_status_id)
- VALUES (%s, %s, %s, %s)
- RETURNING processing_job_id"""
- TRN.add(sql, [analysis['email'], srare_cmd_id, params, 3])
- job_id = TRN.execute_fetchlast()
- # Step 1.2.b: Link the job with the input artifact
- sql = """INSERT INTO qiita.artifact_processing_job
- (artifact_id, processing_job_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [biom_artifact_id, job_id])
- TRN.execute()
- return job_id, params
-
-
-def transfer_file_to_artifact(analysis_id, a_timestamp, command_id,
- data_type_id, params, artifact_type_id,
- filepath_id):
- """Creates a new artifact with the given filepath id
-
- Parameters
- ----------
- analysis_id : int
- The analysis id to attach the artifact
- a_timestamp : datetime.datetime
- The generated timestamp of the artifact
- command_id : int
- The command id of the artifact
- data_type_id : int
- The data type id of the artifact
- params : str
- The parameters of the artifact
- artifact_type_id : int
- The artifact type
- filepath_id : int
- The filepath id
-
- Returns
- -------
- int
- The artifact id
- """
- with TRN:
- # Add the row in the artifact table
- # Magic number 4: Visibility -> sandbox
- sql = """INSERT INTO qiita.artifact
- (generated_timestamp, command_id, data_type_id,
- command_parameters, visibility_id, artifact_type_id,
- submitted_to_vamps)
- VALUES (%s, %s, %s, %s, %s, %s, %s)
- RETURNING artifact_id"""
- TRN.add(sql, [a_timestamp, command_id, data_type_id, params, 4,
- artifact_type_id, False])
- artifact_id = TRN.execute_fetchlast()
- # Link the artifact with its file
- sql = """INSERT INTO qiita.artifact_filepath (artifact_id, filepath_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [artifact_id, filepath_id])
- # Link the artifact with the analysis
- sql = """INSERT INTO qiita.analysis_artifact
- (analysis_id, artifact_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [analysis_id, artifact_id])
-
- return artifact_id
-
-
-def create_rarefied_biom_artifact(analysis, srare_cmd_id, biom_data, params,
- parent_biom_artifact_id, rarefaction_job_id,
- srare_cmd_out_id):
- """Creates the rarefied biom artifact
-
- Parameters
- ----------
- analysis : dict
- The analysis information
- srare_cmd_id : int
- The command id of "Single Rarefaction"
- biom_data : dict
- The biom information
- params : str
- The processing parameters
- parent_biom_artifact_id : int
- The parent biom artifact id
- rarefaction_job_id : str
- The job id of the rarefaction job
- srare_cmd_out_id : int
- The id of the single rarefaction output
-
- Returns
- -------
- int
- The artifact id
- """
- with TRN:
- # Transfer the file to an artifact
- # Magic number 7: artifact type -> biom
- artifact_id = transfer_file_to_artifact(
- analysis['analysis_id'], analysis['timestamp'], srare_cmd_id,
- biom_data['data_type_id'], params, 7, biom_data['filepath_id'])
- # Link the artifact with its parent
- sql = """INSERT INTO qiita.parent_artifact (artifact_id, parent_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [artifact_id, parent_biom_artifact_id])
- # Link the artifact as the job output
- sql = """INSERT INTO qiita.artifact_output_processing_job
- (artifact_id, processing_job_id, command_output_id)
- VALUES (%s, %s, %s)"""
- TRN.add(sql, [artifact_id, rarefaction_job_id, srare_cmd_out_id])
- return artifact_id
-
-
-def transfer_job(analysis, command_id, params, input_artifact_id, job_data,
- cmd_out_id, biom_data, output_artifact_type_id):
- """Transfers the job from the old structure to the plugin structure
-
- Parameters
- ----------
- analysis : dict
- The analysis information
- command_id : int
- The id of the command executed
- params : str
- The parameters used in the job
- input_artifact_id : int
- The id of the input artifact
- job_data : dict
- The job information
- cmd_out_id : int
- The id of the command's output
- biom_data : dict
- The biom information
- output_artifact_type_id : int
- The type of the output artifact
- """
- with TRN:
- # Create the job
- # Add the row in the processing job table
- # Magic number 3: status -> success
- sql = """INSERT INTO qiita.processing_job
- (email, command_id, command_parameters,
- processing_job_status_id)
- VALUES (%s, %s, %s, %s)
- RETURNING processing_job_id"""
- TRN.add(sql, [analysis['email'], command_id, params, 3])
- job_id = TRN.execute_fetchlast()
-
- # Link the job with the input artifact
- sql = """INSERT INTO qiita.artifact_processing_job
- (artifact_id, processing_job_id)
- VALUES (rarefied_biom_id, proc_job_id)"""
- TRN.add(sql, [input_artifact_id, job_id])
-
- # Check if the executed job has results and add them
- sql = """SELECT EXISTS(SELECT *
- FROM qiita.job_results_filepath
- WHERE job_id = %s)"""
- TRN.add(sql, [job_data['job_id']])
- if TRN.execute_fetchlast():
- # There are results for the current job.
- # Transfer the job files to a new artifact
- sql = """SELECT filepath_id
- FROM qiita.job_results_filepath
- WHERE job_id = %s"""
- TRN.add(sql, job_data['job_id'])
- filepath_id = TRN.execute_fetchlast()
- artifact_id = transfer_file_to_artifact(
- analysis['analysis_id'], analysis['timestamp'], command_id,
- biom_data['data_type_id'], params, output_artifact_type_id,
- filepath_id)
-
- # Link the artifact with its parent
- sql = """INSERT INTO qiita.parent_artifact (artifact_id, parent_id)
- VALUES (%s, %s)"""
- TRN.add(sql, [artifact_id, input_artifact_id])
- # Link the artifact as the job output
- sql = """INSERT INTO qiita.artifact_output_processing_job
- (artifact_id, processing_job_id, command_output_id)
- VALUES (%s, %s, %s)"""
- TRN.add(sql, [artifact_id, job_id, cmd_out_id])
- TRN.exeucte()
- else:
- # There are no results on the current job, so mark it as
- # error
- if job_data.log_id is None:
- # Magic number 2 - we are not using any other severity
- # level, so keep using number 2
- sql = """INSERT INTO qiita.logging (time, severity_id, msg)
- VALUES (%s, %s, %s)
- RETURNING logging_id"""
- TRN.add(sql, [analysis['timestamp'], 2,
- "Unknown error - patch 47"])
- else:
- log_id = job_data['log_id']
-
- # Magic number 4 -> status -> error
- sql = """UPDATE qiita.processing_job
- SET processing_job_status_id = 4, logging_id = %s
- WHERE processing_job_id = %s"""
- TRN.add(sql, [log_id, job_id])
-
-
-# The new commands that we are going to add generate new artifact types.
-# These new artifact types are going to be added to a different plugin.
-# In interest of time and given that the artifact type system is going to
-# change in the near future, we feel that the easiest way to transfer
-# the current analyses results is by creating 3 different types of
-# artifacts: (1) distance matrix -> which will include the distance matrix,
-# the principal coordinates and the emperor plots; (2) rarefaction
-# curves -> which will include all the files generated by alpha rarefaction
-# and (3) taxonomy summary, which will include all the files generated
-# by summarize_taxa_through_plots.py
-
-with TRN:
- # Add the new artifact types
- sql = """INSERT INTO qiita.artifact_type (
- artifact_type, description, can_be_submitted_to_ebi,
- can_be_submitted_to_vamps)
- VALUES (%s, %s, %s, %s)
- RETURNING artifact_type_id"""
- TRN.add(sql, ['beta_div_plots', 'Qiime 1 beta diversity results',
- False, False])
- dm_atype_id = TRN.execute_fetchlast()
- TRN.add(sql, ['rarefaction_curves', 'Rarefaction curves', False, False])
- rc_atype_id = TRN.execute_fetchlast()
- TRN.add(sql, ['taxa_summary', 'Taxa summary plots', False, False])
- ts_atype_id = TRN.execute_fetchlast()
-
- # Associate each artifact with the filetypes that it accepts
- # At this time we are going to add them as directories, just as it is done
- # right now. We can make it fancier with the new type system.
- # Magic number 8: the filepath_type_id for the directory
- sql = """INSERT INTO qiita.artifact_type_filepath_type
- (artifact_type_id, filepath_type_id, required)
- VALUES (%s, %s, %s)"""
- sql_args = [[dm_atype_id, 8, True],
- [rc_atype_id, 8, True],
- [ts_atype_id, 8, True]]
- TRN.add(sql, sql_args, many=True)
-
- # Create the new commands that execute the current analyses. In qiita,
- # the only commands that where available are Summarize Taxa, Beta
- # Diversity and Alpha Rarefaction. The system was executing rarefaction
- # by default, but it should be a different step in the analysis process
- # so we are going to create a command for it too. These commands are going
- # to be part of the QIIME plugin, so we are going to first retrieve the
- # id of the QIIME 1.9.1 plugin, which for sure exists cause it was added
- # in patch 33 and there is no way of removing plugins
-
- # Step 1: Get the QIIME plugin id
- sql = """SELECT software_id
- FROM qiita.software
- WHERE name = 'QIIME' AND version = '1.9.1'"""
- TRN.add(sql)
- qiime_id = TRN.execute_fetchlast()
-
- # Step 2: Insert the new commands in the software_command table
- sql = """INSERT INTO qiita.software_command
- (software_id, name, description, is_analysis)
- VALUES (%s, %s, %s, TRUE)
- RETURNING command_id"""
- TRN.add(sql, [qiime_id, 'Summarize Taxa', 'Plots taxonomy summaries at '
- 'different taxonomy levels'])
- sum_taxa_cmd_id = TRN.execute_fetchlast()
- TRN.add(sql, [qiime_id, 'Beta Diversity',
- 'Computes and plots beta diversity results'])
- bdiv_cmd_id = TRN.execute_fetchlast()
- TRN.add(sql, [qiime_id, 'Alpha Rarefaction',
- 'Computes and plots alpha rarefaction results'])
- arare_cmd_id = TRN.execute_fetchlast()
- TRN.add(sql, [qiime_id, 'Single Rarefaction',
- 'Rarefies the input table by random sampling without '
- 'replacement'])
- srare_cmd_id = TRN.execute_fetchlast()
-
- # Step 3: Insert the parameters for each command
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type, required,
- default_value)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING command_parameter_id"""
- sql_args = [
- # Summarize Taxa
- (sum_taxa_cmd_id, 'metadata_category', 'string', False, ''),
- (sum_taxa_cmd_id, 'sort', 'bool', False, 'False'),
- # Beta Diversity
- (bdiv_cmd_id, 'tree', 'string', False, ''),
- (bdiv_cmd_id, 'metric',
- 'choice:["abund_jaccard","binary_chisq","binary_chord",'
- '"binary_euclidean","binary_hamming","binary_jaccard",'
- '"binary_lennon","binary_ochiai","binary_otu_gain","binary_pearson",'
- '"binary_sorensen_dice","bray_curtis","bray_curtis_faith",'
- '"bray_curtis_magurran","canberra","chisq","chord","euclidean",'
- '"gower","hellinger","kulczynski","manhattan","morisita_horn",'
- '"pearson","soergel","spearman_approx","specprof","unifrac",'
- '"unifrac_g","unifrac_g_full_tree","unweighted_unifrac",'
- '"unweighted_unifrac_full_tree","weighted_normalized_unifrac",'
- '"weighted_unifrac"]', False, '"binary_jaccard"'),
- # Alpha rarefaction
- (arare_cmd_id, 'tree', 'string', False, ''),
- (arare_cmd_id, 'num_steps', 'integer', False, 10),
- (arare_cmd_id, 'min_rare_depth', 'integer', False, 10),
- (arare_cmd_id, 'max_rare_depth', 'integer', False, 'Default'),
- (arare_cmd_id, 'metrics',
- 'mchoice:["ace","berger_parker_d","brillouin_d","chao1","chao1_ci",'
- '"dominance","doubles","enspie","equitability","esty_ci",'
- '"fisher_alpha","gini_index","goods_coverage","heip_e",'
- '"kempton_taylor_q","margalef","mcintosh_d","mcintosh_e",'
- '"menhinick","michaelis_menten_fit","observed_otus",'
- '"observed_species","osd","simpson_reciprocal","robbins",'
- '"shannon","simpson","simpson_e","singles","strong","PD_whole_tree"]',
- False, '["chao1","observed_otus"]'),
- # Single rarefaction
- (srare_cmd_id, 'depth', 'integer', True, None),
- (srare_cmd_id, 'subsample_multinomial', 'bool', False, 'False')
- ]
- TRN.add(sql, sql_args, many=True)
-
- TRN.add(sql, [sum_taxa_cmd_id, 'biom_table', 'artifact', True, None])
- sum_taxa_cmd_param_id = TRN.execute_fetchlast()
- TRN.add(sql, [bdiv_cmd_id, 'biom_table', 'artifact', True, None])
- bdiv_cmd_param_id = TRN.execute_fetchlast()
- TRN.add(sql, [arare_cmd_id, 'biom_table', 'artifact', True, None])
- arare_cmd_param_id = TRN.execute_fetchlast()
- TRN.add(sql, [srare_cmd_id, 'biom_table', 'artifact', True, None])
- srare_cmd_param_id = TRN.execute_fetchlast()
-
- # Step 4: Connect the artifact parameters with the artifact types that
- # they accept
- sql = """SELECT artifact_type_id
- FROM qiita.artifact_type
- WHERE artifact_type = 'BIOM'"""
- TRN.add(sql)
- biom_atype_id = TRN.execute_fetchlast()
-
- sql = """INSERT INTO qiita.parameter_artifact_type
- (command_parameter_id, artifact_type_id)
- VALUES (%s, %s)"""
- sql_args = [[sum_taxa_cmd_param_id, biom_atype_id],
- [bdiv_cmd_param_id, biom_atype_id],
- [arare_cmd_param_id, biom_atype_id],
- [srare_cmd_param_id, biom_atype_id]]
- TRN.add(sql, sql_args, many=True)
-
- # Step 5: Add the outputs of the command.
- sql = """INSERT INTO qiita.command_output
- (name, command_id, artifact_type_id)
- VALUES (%s, %s, %s)
- RETURNING command_output_id"""
- TRN.add(sql, ['taxa_summary', sum_taxa_cmd_id, ts_atype_id])
- sum_taxa_cmd_out_id = TRN.execute_fetchlast()
- TRN.add(sql, ['distance_matrix', bdiv_cmd_id, dm_atype_id])
- bdiv_cmd_out_id = TRN.execute_fetchlast()
- TRN.add(sql, ['rarefaction_curves', arare_cmd_id, rc_atype_id])
- arare_cmd_out_id = TRN.execute_fetchlast()
- TRN.add(sql, ['rarefied_table', srare_cmd_id, biom_atype_id])
- srare_cmd_out_id = TRN.execute_fetchlast()
-
- # Step 6: Add default parameter sets
- sql = """INSERT INTO qiita.default_parameter_set
- (command_id, parameter_set_name, parameter_set)
- VALUES (%s, %s, %s)"""
- sql_args = [
- [sum_taxa_cmd_id, 'Defaults',
- '{"sort": false, "metadata_category": ""}'],
- [bdiv_cmd_id, 'Unweighted UniFrac',
- '{"metric": "unweighted_unifrac", "tree": ""}'],
- [arare_cmd_id, 'Defaults',
- '{"max_rare_depth": "Default", "tree": "", "num_steps": 10, '
- '"min_rare_depth": 10, "metrics": ["chao1", "observed_otus"]}'],
- [srare_cmd_id, 'Defaults',
- '{"subsample_multinomial": "False"}']]
- TRN.add(sql, sql_args, many=True)
-
-# At this point we are ready to start transferring the data from the old
-# structures to the new structures. Overview of the procedure:
-# Step 1: Add initial set of artifacts up to rarefied table
-# Step 2: Transfer the "analysis jobs" to processing jobs and create
-# the analysis artifacts
-db_dir = get_db_files_base_dir()
-with TRN:
- sql = "SELECT * FROM qiita.analysis"
- TRN.add(sql)
- analysis_info = TRN.execute_fetchindex()
-
- # Loop through all the analysis
- for analysis in analysis_info:
- # Step 1: Add the inital set of artifacts. An analysis starts with
- # a set of BIOM artifacts.
- sql = """SELECT *
- FROM qiita.analysis_filepath
- JOIN qiita.filepath USING (filepath_id)
- JOIN qiita.filepath_type USING (filepath_type_id)
- WHERE analysis_id = %s AND filepath_type = 'biom'"""
- TRN.add(sql, [analysis['analysis_id']])
- analysis_bioms = TRN.execute_fetchindex()
-
- # Loop through all the biom tables associated with the current analysis
- # so we can create the initial set of artifacts
- for biom_data in analysis_bioms:
- # Get the path of the BIOM table
- sql = """SELECT filepath, mountpoint
- FROM qiita.filepath
- JOIN qiita.data_directory USING (data_directory_id)
- WHERE filepath_id = %s"""
- TRN.add(sql, [biom_data['filepath_id']])
- # Magic number 0: There is only a single row in the query result
- fp_info = TRN.execute_fetchindex()[0]
- filepath = join(db_dir, fp_info['mountpoint'], fp_info['filepath'])
-
- # We need to check if the BIOM table has been rarefied or not
- table = load_table(filepath)
- depths = set(table.sum(axis='sample'))
- if len(depths) == 1:
- # The BIOM table was rarefied
- # Create the initial unrarefied artifact
- initial_biom_artifact_id = create_non_rarefied_biom_artifact(
- analysis, biom_data, table)
- # Create the rarefaction job
- rarefaction_job_id, params = create_rarefaction_job(
- depths.pop(), initial_biom_artifact_id, analysis,
- srare_cmd_id)
- # Create the rarefied artifact
- rarefied_biom_artifact_id = create_rarefied_biom_artifact(
- analysis, srare_cmd_id, biom_data, params,
- initial_biom_artifact_id, rarefaction_job_id,
- srare_cmd_out_id)
- else:
- # The BIOM table was not rarefied, use current table as initial
- initial_biom_id = transfer_file_to_artifact(
- analysis['analysis_id'], analysis['timestamp'], None,
- biom_data['data_type_id'], None, 7,
- biom_data['filepath_id'])
-
- # Loop through all the jobs that used this biom table as input
- sql = """SELECT *
- FROM qiita.job
- WHERE reverse(split_part(reverse(
- options::json->>'--otu_table_fp'), '/', 1)) = %s"""
- TRN.add(sql, [filepath])
- analysis_jobs = TRN.execute_fetchindex()
- for job_data in analysis_jobs:
- # Identify which command the current job exeucted
- if job_data['command_id'] == 1:
- # Taxa summaries
- cmd_id = sum_taxa_cmd_id
- params = ('{"biom_table":%d,"metadata_category":"",'
- '"sort":false}' % initial_biom_id)
- output_artifact_type_id = ts_atype_id
- cmd_out_id = sum_taxa_cmd_out_id
- elif job_data['command_id'] == 2:
- # Beta diversity
- cmd_id = bdiv_cmd_id
- tree_fp = loads(job_data['options'])['--tree_fp']
- if tree_fp:
- params = ('{"biom_table":%d,"tree":"%s","metrics":'
- '["unweighted_unifrac","weighted_unifrac"]}'
- % (initial_biom_id, tree_fp))
- else:
- params = ('{"biom_table":%d,"metrics":["bray_curtis",'
- '"gower","canberra","pearson"]}'
- % initial_biom_id)
- output_artifact_type_id = dm_atype_id
- cmd_out_id = bdiv_cmd_out_id
- else:
- # Alpha rarefaction
- cmd_id = arare_cmd_id
- tree_fp = loads(job_data['options'])['--tree_fp']
- params = ('{"biom_table":%d,"tree":"%s","num_steps":"10",'
- '"min_rare_depth":"10",'
- '"max_rare_depth":"Default"}'
- % (initial_biom_id, tree_fp))
- output_artifact_type_id = rc_atype_id
- cmd_out_id = arare_cmd_out_id
-
- transfer_job(analysis, cmd_id, params, initial_biom_id,
- job_data, cmd_out_id, biom_data,
- output_artifact_type_id)
-
-errors = []
-with TRN:
- # Unlink the analysis from the biom table filepaths
- # Magic number 7 -> biom filepath type
- sql = """DELETE FROM qiita.analysis_filepath
- WHERE filepath_id IN (SELECT filepath_id
- FROM qiita.filepath
- WHERE filepath_type_id = 7)"""
- TRN.add(sql)
- TRN.execute()
-
- # Delete old structures that are not used anymore
- tables = ["collection_job", "collection_analysis", "collection_users",
- "collection", "collection_status", "analysis_workflow",
- "analysis_chain", "analysis_job", "job_results_filepath", "job",
- "job_status", "command_data_type", "command", "analysis_status"]
- for table in tables:
- TRN.add("DROP TABLE qiita.%s" % table)
- try:
- TRN.execute()
- except Exception as e:
- errors.append("Error deleting table %s: %s" % (table, str(e)))
-
-# Purge filepaths
-try:
- purge_filepaths(False)
-except Exception as e:
- errors.append("Error purging filepaths: %s" % str(e))
-
-if errors:
- print("\n".join(errors))
diff --git a/qiita_db/support_files/patches/python_patches/55.py b/qiita_db/support_files/patches/python_patches/55.py
deleted file mode 100644
index 9ec6633d0..000000000
--- a/qiita_db/support_files/patches/python_patches/55.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from qiita_db.sql_connection import TRN
-
-sql = """
- SELECT constraint_name AS cname, 'qiita.' || table_name AS tname
- FROM information_schema.table_constraints
- WHERE constraint_type ='FOREIGN KEY' AND (
- (constraint_name LIKE 'fk_sample_%' AND table_name LIKE 'sample_%') OR
- (constraint_name LIKE 'fk_prep_%' AND table_name LIKE 'prep_%')) AND
- table_name NOT IN (
- 'prep_template', 'prep_template_sample', 'prep_template_filepath',
- 'prep_template_processing_job')"""
-
-with TRN:
- TRN.add(sql)
- to_delete = TRN.execute_fetchindex()
-
-for cname, tname in to_delete:
- with TRN:
- sql = "ALTER TABLE %s DROP CONSTRAINT %s" % (tname, cname)
- TRN.add(sql)
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/58.py b/qiita_db/support_files/patches/python_patches/58.py
deleted file mode 100644
index d77373876..000000000
--- a/qiita_db/support_files/patches/python_patches/58.py
+++ /dev/null
@@ -1,325 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from json import loads, dumps
-
-from qiita_core.qiita_settings import r_client
-from qiita_db.sql_connection import TRN
-from qiita_db.software import Software, Command, Parameters
-from qiita_db.processing_job import ProcessingJob
-from qiita_db.study import Study
-from qiita_db.exceptions import (QiitaDBUnknownIDError, QiitaDBError,
- QiitaDBDuplicateError)
-from qiita_db.metadata_template.prep_template import PrepTemplate
-from qiita_db.util import convert_to_id
-
-
-def create_command(software, name, description, parameters, outputs=None,
- analysis_only=False):
- r"""Replicates the Command.create code at the time the patch was written"""
- # Perform some sanity checks in the parameters dictionary
- if not parameters:
- raise QiitaDBError(
- "Error creating command %s. At least one parameter should "
- "be provided." % name)
- sql_param_values = []
- sql_artifact_params = []
- for pname, vals in parameters.items():
- if len(vals) != 2:
- raise QiitaDBError(
- "Malformed parameters dictionary, the format should be "
- "{param_name: [parameter_type, default]}. Found: "
- "%s for parameter name %s" % (vals, pname))
-
- ptype, dflt = vals
- # Check that the type is one of the supported types
- supported_types = ['string', 'integer', 'float', 'reference',
- 'boolean', 'prep_template', 'analysis']
- if ptype not in supported_types and not ptype.startswith(
- ('choice', 'mchoice', 'artifact')):
- supported_types.extend(['choice', 'mchoice', 'artifact'])
- raise QiitaDBError(
- "Unsupported parameters type '%s' for parameter %s. "
- "Supported types are: %s"
- % (ptype, pname, ', '.join(supported_types)))
-
- if ptype.startswith(('choice', 'mchoice')) and dflt is not None:
- choices = set(loads(ptype.split(':')[1]))
- dflt_val = dflt
- if ptype.startswith('choice'):
- # In the choice case, the dflt value is a single string,
- # create a list with it the string on it to use the
- # issuperset call below
- dflt_val = [dflt_val]
- else:
- # jsonize the list to store it in the DB
- dflt = dumps(dflt)
- if not choices.issuperset(dflt_val):
- raise QiitaDBError(
- "The default value '%s' for the parameter %s is not "
- "listed in the available choices: %s"
- % (dflt, pname, ', '.join(choices)))
-
- if ptype.startswith('artifact'):
- atypes = loads(ptype.split(':')[1])
- sql_artifact_params.append(
- [pname, 'artifact', atypes])
- else:
- if dflt is not None:
- sql_param_values.append([pname, ptype, False, dflt])
- else:
- sql_param_values.append([pname, ptype, True, None])
-
- with TRN:
- sql = """SELECT EXISTS(SELECT *
- FROM qiita.software_command
- WHERE software_id = %s AND name = %s)"""
- TRN.add(sql, [software.id, name])
- if TRN.execute_fetchlast():
- raise QiitaDBDuplicateError(
- "command", "software: %d, name: %s"
- % (software.id, name))
- # Add the command to the DB
- sql = """INSERT INTO qiita.software_command
- (name, software_id, description, is_analysis)
- VALUES (%s, %s, %s, %s)
- RETURNING command_id"""
- sql_params = [name, software.id, description, analysis_only]
- TRN.add(sql, sql_params)
- c_id = TRN.execute_fetchlast()
-
- # Add the parameters to the DB
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type, required,
- default_value)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING command_parameter_id"""
- sql_params = [[c_id, pname, p_type, reqd, default]
- for pname, p_type, reqd, default in sql_param_values]
- TRN.add(sql, sql_params, many=True)
- TRN.execute()
-
- # Add the artifact parameters
- sql_type = """INSERT INTO qiita.parameter_artifact_type
- (command_parameter_id, artifact_type_id)
- VALUES (%s, %s)"""
- supported_types = []
- for pname, p_type, atypes in sql_artifact_params:
- sql_params = [c_id, pname, p_type, True, None]
- TRN.add(sql, sql_params)
- pid = TRN.execute_fetchlast()
- sql_params = [[pid, convert_to_id(at, 'artifact_type')]
- for at in atypes]
- TRN.add(sql_type, sql_params, many=True)
- supported_types.extend([atid for _, atid in sql_params])
-
- # If the software type is 'artifact definition', there are a couple
- # of extra steps
- if software.type == 'artifact definition':
- # If supported types is not empty, link the software with these
- # types
- if supported_types:
- sql = """INSERT INTO qiita.software_artifact_type
- (software_id, artifact_type_id)
- VALUES (%s, %s)"""
- sql_params = [[software.id, atid]
- for atid in supported_types]
- TRN.add(sql, sql_params, many=True)
- # If this is the validate command, we need to add the
- # provenance and name parameters. These are used internally,
- # that's why we are adding them here
- if name == 'Validate':
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type,
- required, default_value)
- VALUES (%s, 'name', 'string', 'False',
- 'dflt_name'),
- (%s, 'provenance', 'string', 'False', NULL)
- """
- TRN.add(sql, [c_id, c_id])
-
- # Add the outputs to the command
- if outputs:
- sql = """INSERT INTO qiita.command_output
- (name, command_id, artifact_type_id)
- VALUES (%s, %s, %s)"""
- sql_args = [[pname, c_id, convert_to_id(at, 'artifact_type')]
- for pname, at in outputs.items()]
- TRN.add(sql, sql_args, many=True)
- TRN.execute()
-
- return Command(c_id)
-
-
-def correct_redis_data(key, cmd, values_dict, user):
- """Corrects the data stored in the redis DB
-
- Parameters
- ----------
- key: str
- The redis key to fix
- cmd : qiita_db.software.Command
- Command to use to create the processing job
- values_dict : dict
- Dictionary used to instantiate the parameters of the command
- user : qiita_db.user. User
- The user that will own the job
- """
- info = r_client.get(key)
- if info:
- info = loads(info)
- if info['job_id'] is not None:
- if 'is_qiita_job' in info:
- if info['is_qiita_job']:
- try:
- job = ProcessingJob(info['job_id'])
- payload = {'job_id': info['job_id'],
- 'alert_type': info['status'],
- 'alert_msg': info['alert_msg']}
- r_client.set(key, dumps(payload))
- except (QiitaDBUnknownIDError, KeyError):
- # We shomehow lost the information of this job
- # Simply delete the key
- r_client.delete(key)
- else:
- # These jobs don't contain any information on the live
- # dump. We can safely delete the key
- r_client.delete(key)
- else:
- # These jobs don't contain any information on the live
- # dump. We can safely delete the key
- r_client.delete(key)
- else:
- # Job is null, we have the information here
- if info['status'] == 'success':
- # In the success case no information is stored. We can
- # safely delete the key
- r_client.delete(key)
- elif info['status'] == 'warning':
- # In case of warning the key message stores the warning
- # message. We need to create a new job, mark it as
- # successful and store the error message as expected by
- # the new structure
- params = Parameters.load(cmd, values_dict=values_dict)
- job = ProcessingJob.create(user, params)
- job._set_status('success')
- payload = {'job_id': job.id,
- 'alert_type': 'warning',
- 'alert_msg': info['message']}
- r_client.set(key, dumps(payload))
- else:
- # The status is error. The key message stores the error
- # message. We need to create a new job and mark it as
- # failed with the given error message
- params = Parameters.load(cmd, values_dict=values_dict)
- job = ProcessingJob.create(user, params)
- job._set_error(info['message'])
- payload = {'job_id': job.id}
- r_client.set(key, dumps(payload))
- else:
- # The key doesn't contain any information. Delete the key
- r_client.delete(key)
-
-
-with TRN:
- # Retrieve the Qiita plugin
- qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
-
- # Create the submit command for VAMPS command
- parameters = {'artifact': ['integer', None]}
- create_command(qiita_plugin, "submit_to_VAMPS",
- "submits an artifact to VAMPS", parameters)
-
- # Create the copy artifact command
- parameters = {'artifact': ['integer', None],
- 'prep_template': ['prep_template', None]}
- create_command(qiita_plugin, "copy_artifact",
- "Creates a copy of an artifact", parameters)
-
- # Create the submit command for EBI command
- parameters = {'artifact': ['integer', None],
- 'submission_type': ['choice:["ADD", "MODIFY"]', 'ADD']}
- create_command(qiita_plugin, "submit_to_EBI",
- "submits an artifact to EBI", parameters)
-
- # Create the submit command for delete_artifact
- parameters = {'artifact': ['integer', None]}
- create_command(qiita_plugin, "delete_artifact",
- "Delete an artifact", parameters)
-
- # Create the submit command for create a sample template
- parameters = {
- 'fp': ['string', None], 'study_id': ['integer', None],
- 'is_mapping_file': ['boolean', True], 'data_type': ['string', None]}
- create_command(qiita_plugin, "create_sample_template",
- "Create a sample template", parameters)
-
- # Create the update sample template command
- parameters = {'study': ['integer', None], 'template_fp': ['string', None]}
- st_cmd = create_command(qiita_plugin, "update_sample_template",
- "Updates the sample template", parameters)
-
- # Create the delete study command
- parameters = {'study': ['integer', None]}
- create_command(qiita_plugin, "delete_study",
- "Deletes a full study", parameters)
-
- # Create the delete sample template command
- parameters = {'study': ['integer', None]}
- create_command(qiita_plugin, "delete_sample_template",
- "Deletes a sample template", parameters)
-
- # Create the update prep template command
- parameters = {'prep_template': ['integer', None],
- 'template_fp': ['string', None]}
- pt_cmd = create_command(qiita_plugin, "update_prep_template",
- "Updates the prep template", parameters)
-
- # Create the delete sample or column command
- parameters = {
- 'obj_class': ['choice:["SampleTemplate", "PrepTemplate"]', None],
- 'obj_id': ['integer', None],
- 'sample_or_col': ['choice:["samples", "columns"]', None],
- 'name': ['string', None]}
- create_command(qiita_plugin, "delete_sample_or_column",
- "Deletes a sample or a columns from the metadata",
- parameters)
-
- # Create the command to complete a job
- parameters = {'job_id': ['string', None], 'payload': ['string', None]}
- create_command(qiita_plugin, "complete_job", "Completes a given job",
- parameters)
-
- # Assumptions on the structure of the data in the redis database has
- # changed, we need to fix to avoid failures
- # Get all the sample template keys
- for key in r_client.keys('sample_template_[0-9]*'):
- try:
- study = Study(int(key.split('_')[-1]))
- user = study.owner
- except QiitaDBUnknownIDError:
- # This means that the study no longer exists - delete the key
- # and continue
- r_client.delete(key)
- continue
- values_dict = {'study': study.id, 'template_fp': 'ignored-patch58'}
- correct_redis_data(key, st_cmd, values_dict, user)
-
- # Get all the prep template keys
- for key in r_client.keys('prep_template_[0-9]*'):
- try:
- pt = PrepTemplate(int(key.split('_')[-1]))
- user = Study(pt.study_id).owner
- except QiitaDBUnknownIDError:
- # This means that the prep template no longer exists - delete the
- # key and continue
- r_client.delete(key)
- continue
- values_dict = {'prep_template': pt.id,
- 'template_fp': 'ignored-patch58'}
- correct_redis_data(key, pt_cmd, values_dict, user)
diff --git a/qiita_db/support_files/patches/python_patches/6.py b/qiita_db/support_files/patches/python_patches/6.py
deleted file mode 100644
index d04b10e76..000000000
--- a/qiita_db/support_files/patches/python_patches/6.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# Nov 22, 2014
-# This patch is to create all the prep/sample template files and link them in
-# the database so they are present for download
-
-from os.path import join
-from time import strftime
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- _id, fp_base = qdb.util.get_mountpoint('templates')[0]
-
- qdb.sql_connection.TRN.add("SELECT study_id FROM qiita.study")
- for study_id in qdb.sql_connection.TRN.execute_fetchflatten():
- if qdb.metadata_template.sample_template.SampleTemplate.exists(
- study_id):
- st = qdb.metadata_template.sample_template.SampleTemplate(study_id)
- fp = join(fp_base,
- '%d_%s.txt' % (study_id, strftime("%Y%m%d-%H%M%S")))
- st.to_file(fp)
- st.add_filepath(fp)
-
- qdb.sql_connection.TRN.add(
- "SELECT prep_template_id FROM qiita.prep_template")
- for prep_template_id in qdb.sql_connection.TRN.execute_fetchflatten():
- pt = qdb.metadata_template.prep_template.PrepTemplate(prep_template_id)
- study_id = pt.study_id
-
- fp = join(fp_base,
- '%d_prep_%d_%s.txt' % (pt.study_id, prep_template_id,
- strftime("%Y%m%d-%H%M%S")))
- pt.to_file(fp)
- pt.add_filepath(fp)
diff --git a/qiita_db/support_files/patches/python_patches/61.py b/qiita_db/support_files/patches/python_patches/61.py
deleted file mode 100644
index 8ebbab746..000000000
--- a/qiita_db/support_files/patches/python_patches/61.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# October 30th, 2017
-# A change introduced in July made all the parameters to be stored as strings
-# The DB needs to be patched so all the artifacts follow this structure
-
-from json import dumps
-
-from qiita_db.sql_connection import TRN
-
-with TRN:
- sql = """SELECT *
- FROM qiita.artifact
- JOIN qiita.artifact_output_processing_job
- USING (artifact_id)
- WHERE command_id IS NOT NULL"""
- TRN.add(sql)
-
- sql_update_artifact = """UPDATE qiita.artifact
- SET command_parameters = %s
- WHERE artifact_id = %s"""
- sql_update_job = """UPDATE qiita.processing_job
- SET command_parameters = %s
- WHERE processing_job_id = %s"""
- for ainfo in TRN.execute_fetchindex():
- ainfo = dict(ainfo)
- params = dumps(
- {k: str(v) for k, v in ainfo['command_parameters'].items()})
- TRN.add(sql_update_artifact, [params, ainfo['artifact_id']])
- TRN.add(sql_update_job, [params, ainfo['processing_job_id']])
diff --git a/qiita_db/support_files/patches/python_patches/62.py b/qiita_db/support_files/patches/python_patches/62.py
deleted file mode 100644
index 35f638479..000000000
--- a/qiita_db/support_files/patches/python_patches/62.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Nov 28, 2017 (only in py file)
-# Adding a new command into Qiita/Alpha: delete_analysis
-
-from qiita_db.software import Software, Command
-from qiita_db.sql_connection import TRN
-
-# Create the delete study command
-Command.create(Software.from_name_and_version('Qiita', 'alpha'),
- 'delete_analysis', 'Deletes a full analysis',
- {'analysis_id': ['integer', None]})
-
-# Make sure that all validate commands have the "analysis" parameter
-with TRN:
- # Get all validate commands that are missing the analysis parameter
- sql = """SELECT command_id
- FROM qiita.software_command sc
- WHERE name = 'Validate' AND NOT (
- SELECT EXISTS(SELECT *
- FROM qiita.command_parameter
- WHERE parameter_name = 'analysis'
- AND command_id = sc.command_id));"""
- TRN.add(sql)
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type,
- required, default_value, name_order, check_biom_merge)
- VALUES (6, 'analysis', 'analysis', false, NULL, NULL, false)"""
- sql_params = [[cmd_id, 'analysis', 'analysis', False, None, None, False]
- for cmd_id in TRN.execute_fetchflatten()]
- TRN.add(sql, sql_params, many=True)
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/64.py b/qiita_db/support_files/patches/python_patches/64.py
deleted file mode 100644
index ef58efac4..000000000
--- a/qiita_db/support_files/patches/python_patches/64.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# April 5, 2018
-# Making sure that all parameter in the artifacts are strings
-
-from json import dumps
-from qiita_db.sql_connection import TRN
-
-# Make sure that all validate commands have the "analysis" parameter
-with TRN:
- # Get all validate commands that are missing the analysis parameter
- sql = """SELECT artifact_id, command_parameters
- FROM qiita.artifact"""
- TRN.add(sql)
-
- all_rows = TRN.execute_fetchindex()
-
-sql = """UPDATE qiita.artifact
- SET command_parameters = %s
- WHERE artifact_id = %s"""
-# taking the loop outside so we can have a TRN per change
-for row in all_rows:
- aid, params = row
-
- if params is None:
- continue
-
- if any([isinstance(v, int) for k, v in params.items()]):
- continue
-
- params = {k: str(v) if isinstance(v, int) else v
- for k, v in params.items()}
-
- with TRN:
- TRN.add(sql, [dumps(params), aid])
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/66.py b/qiita_db/support_files/patches/python_patches/66.py
deleted file mode 100644
index 4e288a44e..000000000
--- a/qiita_db/support_files/patches/python_patches/66.py
+++ /dev/null
@@ -1,185 +0,0 @@
-from json import loads, dumps
-from qiita_db.sql_connection import TRN
-from qiita_db.software import Software, Command
-from qiita_db.exceptions import (QiitaDBError, QiitaDBDuplicateError)
-from qiita_db.util import convert_to_id
-from qiita_db.study import Study
-from re import sub
-
-
-# August 6, 2018
-# Create parameters for the ssh/scp remote file upload commands
-# Copied from patch 58.py. Couldn't import due to how patching system works
-def create_command(software, name, description, parameters, outputs=None,
- analysis_only=False):
- r"""Replicates the Command.create code at the time the patch was written"""
- # Perform some sanity checks in the parameters dictionary
- if not parameters:
- raise QiitaDBError(
- "Error creating command %s. At least one parameter should "
- "be provided." % name)
- sql_param_values = []
- sql_artifact_params = []
- for pname, vals in parameters.items():
- # vals should always have 2 elements as it should be:
- # param_name: [parameter_type, default]
- if len(vals) != 2:
- raise QiitaDBError(
- "Malformed parameters dictionary, the format should be "
- "{param_name: [parameter_type, default]}. Found: "
- "%s for parameter name %s" % (vals, pname))
-
- ptype, dflt = vals
- # Check that the type is one of the supported types
- supported_types = ['string', 'integer', 'float', 'reference',
- 'boolean', 'prep_template', 'analysis']
- if ptype not in supported_types and not ptype.startswith(
- ('choice', 'mchoice', 'artifact')):
- supported_types.extend(['choice', 'mchoice', 'artifact'])
- raise QiitaDBError(
- "Unsupported parameters type '%s' for parameter %s. "
- "Supported types are: %s"
- % (ptype, pname, ', '.join(supported_types)))
-
- if ptype.startswith(('choice', 'mchoice')) and dflt is not None:
- choices = set(loads(ptype.split(':')[1]))
- dflt_val = dflt
- if ptype.startswith('choice'):
- # In the choice case, the dflt value is a single string,
- # create a list with it the string on it to use the
- # issuperset call below
- dflt_val = [dflt_val]
- else:
- # jsonize the list to store it in the DB
- dflt = dumps(dflt)
- if not choices.issuperset(dflt_val):
- raise QiitaDBError(
- "The default value '%s' for the parameter %s is not "
- "listed in the available choices: %s"
- % (dflt, pname, ', '.join(choices)))
-
- if ptype.startswith('artifact'):
- atypes = loads(ptype.split(':')[1])
- sql_artifact_params.append(
- [pname, 'artifact', atypes])
- else:
- if dflt is not None:
- sql_param_values.append([pname, ptype, False, dflt])
- else:
- sql_param_values.append([pname, ptype, True, None])
-
- with TRN:
- sql = """SELECT EXISTS(SELECT *
- FROM qiita.software_command
- WHERE software_id = %s AND name = %s)"""
- TRN.add(sql, [software.id, name])
- if TRN.execute_fetchlast():
- raise QiitaDBDuplicateError(
- "command", "software: %d, name: %s"
- % (software.id, name))
- # Add the command to the DB
- sql = """INSERT INTO qiita.software_command
- (name, software_id, description, is_analysis)
- VALUES (%s, %s, %s, %s)
- RETURNING command_id"""
- sql_params = [name, software.id, description, analysis_only]
- TRN.add(sql, sql_params)
- c_id = TRN.execute_fetchlast()
-
- # Add the parameters to the DB
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type, required,
- default_value)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING command_parameter_id"""
- sql_params = [[c_id, pname, p_type, reqd, default]
- for pname, p_type, reqd, default in sql_param_values]
- TRN.add(sql, sql_params, many=True)
- TRN.execute()
-
- # Add the artifact parameters
- sql_type = """INSERT INTO qiita.parameter_artifact_type
- (command_parameter_id, artifact_type_id)
- VALUES (%s, %s)"""
- supported_types = []
- for pname, p_type, atypes in sql_artifact_params:
- sql_params = [c_id, pname, p_type, True, None]
- TRN.add(sql, sql_params)
- pid = TRN.execute_fetchlast()
- sql_params = [[pid, convert_to_id(at, 'artifact_type')]
- for at in atypes]
- TRN.add(sql_type, sql_params, many=True)
- supported_types.extend([atid for _, atid in sql_params])
-
- # If the software type is 'artifact definition', there are a couple
- # of extra steps
- if software.type == 'artifact definition':
- # If supported types is not empty, link the software with these
- # types
- if supported_types:
- sql = """INSERT INTO qiita.software_artifact_type
- (software_id, artifact_type_id)
- VALUES (%s, %s)"""
- sql_params = [[software.id, atid]
- for atid in supported_types]
- TRN.add(sql, sql_params, many=True)
- # If this is the validate command, we need to add the
- # provenance and name parameters. These are used internally,
- # that's why we are adding them here
- if name == 'Validate':
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type,
- required, default_value)
- VALUES (%s, 'name', 'string', 'False',
- 'dflt_name'),
- (%s, 'provenance', 'string', 'False', NULL)
- """
- TRN.add(sql, [c_id, c_id])
-
- # Add the outputs to the command
- if outputs:
- sql = """INSERT INTO qiita.command_output
- (name, command_id, artifact_type_id)
- VALUES (%s, %s, %s)"""
- sql_args = [[pname, c_id, convert_to_id(at, 'artifact_type')]
- for pname, at in outputs.items()]
- TRN.add(sql, sql_args, many=True)
- TRN.execute()
-
- return Command(c_id)
-
-
-with TRN:
- qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
-
- # Create the 'list_remote_files' command
- parameters = {'url': ['string', None],
- 'private_key': ['string', None],
- 'study_id': ['integer', None]}
- create_command(qiita_plugin, "list_remote_files",
- "retrieves list of valid study files from remote dir",
- parameters)
-
- # Create the 'download_remote_files' command
- parameters = {'url': ['string', None],
- 'destination': ['string', None],
- 'private_key': ['string', None]}
- create_command(qiita_plugin, "download_remote_files",
- "downloads valid study files from remote dir", parameters)
-
-
-# August 31, 2018
-# Strip any UTF-8 characters that are not also printable ASCII characters
-# from study titles. As some analysis packages cannot interpret UTF-8
-# characters, it becomes important to remove them from study titles, as
-# they are used as metadata/identifiers when creating new analyses.
-
-# insert new status_types into list, or replace w/a call to an appropriate
-# method.
-status_types = ['awaiting_approval', 'sandbox', 'private', 'public']
-
-for status_type in status_types:
- for study in Study.get_by_status(status_type):
- new_title = sub(r'[^\x20-\x7E]+', '', study.title)
- if new_title != study.title:
- study.title = new_title
diff --git a/qiita_db/support_files/patches/python_patches/7.py b/qiita_db/support_files/patches/python_patches/7.py
deleted file mode 100644
index a1d3d9819..000000000
--- a/qiita_db/support_files/patches/python_patches/7.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2014--, The Qiita Development Team.
-#
-# Distributed under the terms of the BSD 3-clause License.
-#
-# The full license is in the file LICENSE, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# 23 Nov, 2014
-# This patch creates all the qiime mapping files for the existing
-# prep templates
-
-import qiita_db as qdb
-
-with qdb.sql_connection.TRN:
- _id, fp_base = qdb.util.get_mountpoint('templates')[0]
-
- qdb.sql_connection.TRN.add(
- "SELECT prep_template_id FROM qiita.prep_template")
- for prep_template_id in qdb.sql_connection.TRN.execute_fetchflatten():
- pt = qdb.metadata_template.prep_template.PrepTemplate(prep_template_id)
- study_id = pt.study_id
-
- for _, fpt in pt.get_filepaths():
- pt.create_qiime_mapping_file(fpt)
diff --git a/qiita_db/support_files/patches/python_patches/71.py b/qiita_db/support_files/patches/python_patches/71.py
deleted file mode 100644
index 198bf86ea..000000000
--- a/qiita_db/support_files/patches/python_patches/71.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from qiita_db.sql_connection import TRN
-
-with TRN:
- sql = """SELECT DISTINCT table_name
- FROM information_schema.columns
- WHERE table_name LIKE '%_bk'"""
- TRN.add(sql)
- tables = ['qiita.%s' % t for t in TRN.execute_fetchflatten()]
-
-chunk_size = 200
-for i in range(0, len(tables), chunk_size):
- chunk = tables[i:chunk_size+i]
- sql = "DROP TABLE %s" % ', '.join(chunk)
- with TRN:
- TRN.add(sql)
- TRN.execute()
- TRN.commit()
diff --git a/qiita_db/support_files/patches/python_patches/74-helper-step1.py b/qiita_db/support_files/patches/python_patches/74-helper-step1.py
deleted file mode 100644
index dc26593ca..000000000
--- a/qiita_db/support_files/patches/python_patches/74-helper-step1.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from joblib import Parallel, delayed
-
-from os.path import getsize, exists, dirname, abspath, join
-from qiita_db.util import get_filepath_information, compute_checksum
-from qiita_db.sql_connection import TRN
-
-
-# helper function to calculate checksum and file size
-def calculate(finfo):
- try:
- size = getsize(finfo['fullpath'])
- except (FileNotFoundError, PermissionError):
- return finfo, None, None
-
- checksum = compute_checksum(finfo['fullpath'])
-
- return finfo['filepath_id'], checksum, size
-
-
-# get all filepaths and their filepath information; takes ~10 min
-with TRN:
- TRN.add("SELECT filepath_id FROM qiita.filepath")
- files = []
- for fid in TRN.execute_fetchflatten():
- files.append(get_filepath_information(fid))
-
-
-# just get the filepath ids that haven't been processed, the file format
-# of this file is filepath_id[tab]checksum[tab]filesize
-fpath = join(dirname(abspath(__file__)), '74.py.cache.tsv')
-processed = []
-if exists(fpath):
- with open(fpath, 'r') as f:
- processed = [int(line.split('\t')[0])
- for line in f.read().split('\n') if line != '']
-files_curr = [f for f in files if f['filepath_id'] not in processed]
-
-# let's use 20 processor and in each iteration use 120 files
-fids = 120
-processors = 20
-files_len = len(files_curr)
-files_chunks = [files_curr[i * fids:(i + 1) * fids]
- for i in range((files_len + fids - 1) // fids)]
-
-with Parallel(n_jobs=processors, verbose=100) as parallel:
- for fc in files_chunks:
- results = parallel(delayed(calculate)(finfo) for finfo in fc)
-
- with open(fpath, 'a') as f:
- f.write('%s\n' % '\n'.join(['\t'.join(
- [str(fid), str(cs), str(fsize)])
- for fid, cs, fsize in results
- if cs is not None and fsize is not None]))
diff --git a/qiita_db/support_files/patches/python_patches/74-helper-step2.py b/qiita_db/support_files/patches/python_patches/74-helper-step2.py
deleted file mode 100644
index 35d05b7e5..000000000
--- a/qiita_db/support_files/patches/python_patches/74-helper-step2.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import pandas as pd
-from os.path import join, dirname, abspath, exists
-from qiita_db.sql_connection import TRN
-
-
-with TRN:
- sql = """SELECT filepath_id
- FROM qiita.filepath"""
- TRN.add(sql)
- fids = TRN.execute_fetchflatten()
-
-fpath = join(dirname(abspath(__file__)), 'support_files', 'patches',
- 'python_patches', '74.py.cache.tsv')
-if not exists(fpath):
- raise ValueError("%s doesn't exits, have you run step 1?" % fpath)
-df = pd.read_csv(fpath, sep='\t', index_col=0, dtype=str,
- names=['filepath_id', 'checksum', 'fp_size'])
-cache = df.to_dict('index')
-
-args = []
-for fid in fids:
- if fid not in cache:
- print('missing: %d', fid)
- else:
- args.append([cache[fid]['fp_size'], cache[fid]['checksum'], fid])
-
-with TRN:
- sql = """UPDATE qiita.filepath
- SET fp_size = %s, checksum = %s
- WHERE filepath_id = %s"""
- TRN.add(sql, args, many=True)
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/74.py b/qiita_db/support_files/patches/python_patches/74.py
deleted file mode 100644
index 63d745654..000000000
--- a/qiita_db/support_files/patches/python_patches/74.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import pandas as pd
-from os.path import getsize, join, dirname, abspath, exists
-from qiita_db.util import get_filepath_information, compute_checksum
-from qiita_db.sql_connection import TRN
-
-
-with TRN:
- sql = """SELECT filepath_id
- FROM qiita.filepath"""
- TRN.add(sql)
- fids = TRN.execute_fetchflatten()
-
-
-fpath = join(dirname(abspath(__file__)), 'support_files', 'patches',
- 'python_patches', '74.py.cache.tsv')
-cache = dict()
-if exists(fpath):
- df = pd.read_csv(fpath, sep='\t', index_col=0, dtype=str,
- names=['filepath_id', 'checksum', 'fp_size'])
- cache = df.to_dict('index')
-
-for fid in fids:
- if fid not in cache:
- finfo = get_filepath_information(fid)
- try:
- size = getsize(finfo['fullpath'])
- except FileNotFoundError:
- size = 0
-
- try:
- checksum = compute_checksum(finfo['fullpath'])
- except FileNotFoundError:
- checksum = ''
- else:
- checksum = cache[fid]['checksum']
- size = cache[fid]['fp_size']
-
- with TRN:
- sql = """UPDATE qiita.filepath
- SET fp_size = %s, checksum = %s
- WHERE filepath_id = %s"""
- TRN.add(sql, tuple([size, checksum, fid]))
- TRN.execute()
diff --git a/qiita_db/support_files/patches/python_patches/75.py b/qiita_db/support_files/patches/python_patches/75.py
deleted file mode 100644
index faf6e9ac7..000000000
--- a/qiita_db/support_files/patches/python_patches/75.py
+++ /dev/null
@@ -1,175 +0,0 @@
-from qiita_db.user import User
-from json import loads, dumps
-
-from qiita_core.qiita_settings import r_client
-from qiita_db.sql_connection import TRN
-from qiita_db.software import Software, Command
-from qiita_db.exceptions import QiitaDBError, QiitaDBDuplicateError
-from qiita_db.util import convert_to_id
-
-
-# one of the main side issues raised by #2901 is that new users emails were not
-# added to the redis database that keep track of emails and is used to
-# autocomplete when sharing a study. In the next look we will find all `Users`
-# in the `user` level, search them in the redis database, and keep those ones
-# that are not found
-missing_emails = [u[0] for u in User.iter() if User(u[0]).level == 'user'
- and not r_client.execute_command(
- 'zrangebylex', 'qiita-usernames',
- '[%s' % u[0], u'[%s\xff' % u[0])]
-
-# now just add them
-for email in missing_emails:
- r_client.zadd('qiita-usernames', {email: 0})
-
-
-# adding new internal command for INSDC download
-# note that create_command is a method that has been used in previous patches
-def create_command(software, name, description, parameters, outputs=None,
- analysis_only=False):
- r"""Replicates the Command.create code at the time the patch was written"""
- # Perform some sanity checks in the parameters dictionary
- if not parameters:
- raise QiitaDBError(
- "Error creating command %s. At least one parameter should "
- "be provided." % name)
- sql_param_values = []
- sql_artifact_params = []
- for pname, vals in parameters.items():
- if len(vals) != 2:
- raise QiitaDBError(
- "Malformed parameters dictionary, the format should be "
- "{param_name: [parameter_type, default]}. Found: "
- "%s for parameter name %s" % (vals, pname))
-
- ptype, dflt = vals
- # Check that the type is one of the supported types
- supported_types = ['string', 'integer', 'float', 'reference',
- 'boolean', 'prep_template', 'analysis']
- if ptype not in supported_types and not ptype.startswith(
- ('choice', 'mchoice', 'artifact')):
- supported_types.extend(['choice', 'mchoice', 'artifact'])
- raise QiitaDBError(
- "Unsupported parameters type '%s' for parameter %s. "
- "Supported types are: %s"
- % (ptype, pname, ', '.join(supported_types)))
-
- if ptype.startswith(('choice', 'mchoice')) and dflt is not None:
- choices = set(loads(ptype.split(':')[1]))
- dflt_val = dflt
- if ptype.startswith('choice'):
- # In the choice case, the dflt value is a single string,
- # create a list with it the string on it to use the
- # issuperset call below
- dflt_val = [dflt_val]
- else:
- # jsonize the list to store it in the DB
- dflt = dumps(dflt)
- if not choices.issuperset(dflt_val):
- raise QiitaDBError(
- "The default value '%s' for the parameter %s is not "
- "listed in the available choices: %s"
- % (dflt, pname, ', '.join(choices)))
-
- if ptype.startswith('artifact'):
- atypes = loads(ptype.split(':')[1])
- sql_artifact_params.append(
- [pname, 'artifact', atypes])
- else:
- if dflt is not None:
- sql_param_values.append([pname, ptype, False, dflt])
- else:
- sql_param_values.append([pname, ptype, True, None])
-
- with TRN:
- sql = """SELECT EXISTS(SELECT *
- FROM qiita.software_command
- WHERE software_id = %s AND name = %s)"""
- TRN.add(sql, [software.id, name])
- if TRN.execute_fetchlast():
- raise QiitaDBDuplicateError(
- "command", "software: %d, name: %s"
- % (software.id, name))
- # Add the command to the DB
- sql = """INSERT INTO qiita.software_command
- (name, software_id, description, is_analysis)
- VALUES (%s, %s, %s, %s)
- RETURNING command_id"""
- sql_params = [name, software.id, description, analysis_only]
- TRN.add(sql, sql_params)
- c_id = TRN.execute_fetchlast()
-
- # Add the parameters to the DB
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type, required,
- default_value)
- VALUES (%s, %s, %s, %s, %s)
- RETURNING command_parameter_id"""
- sql_params = [[c_id, pname, p_type, reqd, default]
- for pname, p_type, reqd, default in sql_param_values]
- TRN.add(sql, sql_params, many=True)
- TRN.execute()
-
- # Add the artifact parameters
- sql_type = """INSERT INTO qiita.parameter_artifact_type
- (command_parameter_id, artifact_type_id)
- VALUES (%s, %s)"""
- supported_types = []
- for pname, p_type, atypes in sql_artifact_params:
- sql_params = [c_id, pname, p_type, True, None]
- TRN.add(sql, sql_params)
- pid = TRN.execute_fetchlast()
- sql_params = [[pid, convert_to_id(at, 'artifact_type')]
- for at in atypes]
- TRN.add(sql_type, sql_params, many=True)
- supported_types.extend([atid for _, atid in sql_params])
-
- # If the software type is 'artifact definition', there are a couple
- # of extra steps
- if software.type == 'artifact definition':
- # If supported types is not empty, link the software with these
- # types
- if supported_types:
- sql = """INSERT INTO qiita.software_artifact_type
- (software_id, artifact_type_id)
- VALUES (%s, %s)"""
- sql_params = [[software.id, atid]
- for atid in supported_types]
- TRN.add(sql, sql_params, many=True)
- # If this is the validate command, we need to add the
- # provenance and name parameters. These are used internally,
- # that's why we are adding them here
- if name == 'Validate':
- sql = """INSERT INTO qiita.command_parameter
- (command_id, parameter_name, parameter_type,
- required, default_value)
- VALUES (%s, 'name', 'string', 'False',
- 'dflt_name'),
- (%s, 'provenance', 'string', 'False', NULL)
- """
- TRN.add(sql, [c_id, c_id])
-
- # Add the outputs to the command
- if outputs:
- sql = """INSERT INTO qiita.command_output
- (name, command_id, artifact_type_id)
- VALUES (%s, %s, %s)"""
- sql_args = [[pname, c_id, convert_to_id(at, 'artifact_type')]
- for pname, at in outputs.items()]
- TRN.add(sql, sql_args, many=True)
- TRN.execute()
-
- return Command(c_id)
-
-
-with TRN:
- # Retrieve the Qiita plugin
- qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
-
- # Create the INSDC download command
- parameters = {
- 'download_source': ['choice:["EBI-ENA", "SRA"]', 'EBI-ENA'],
- 'accession': ["string", 'None'],
- }
- create_command(qiita_plugin, "INSDC_download",
- "Downloads an accession from a given INSDC", parameters)
diff --git a/qiita_db/support_files/patches/python_patches/81.py b/qiita_db/support_files/patches/python_patches/81.py
deleted file mode 100644
index dc131ba1b..000000000
--- a/qiita_db/support_files/patches/python_patches/81.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from os.path import basename
-
-from qiita_db.sql_connection import TRN
-from qiita_db.study import Study
-
-
-for study in Study.iter():
- for pt in study.prep_templates():
- filepaths = pt.get_filepaths()
- if filepaths:
- # filepaths are returned in order so we can take the
- # oldest and newest; then we get the filename and parse the
- # creation time. Note that the filename comes in one of these
- # formats: 1_prep_1_qiime_19700101-000000.txt or
- # 1_prep_1_19700101-000000.txt
- oldest = basename(filepaths[-1][1])[-19:-4].replace('-', ' ')
- newest = basename(filepaths[0][1])[-19:-4].replace('-', ' ')
-
- with TRN:
- sql = """UPDATE qiita.prep_template
- SET creation_timestamp = %s,
- modification_timestamp = %s
- WHERE prep_template_id = %s"""
- TRN.add(sql, [oldest, newest, pt.id])
- TRN.execute()
diff --git a/qiita_db/support_files/patches/test_db_sql/92.sql b/qiita_db/support_files/patches/test_db_sql/92.sql
new file mode 100644
index 000000000..f0449c275
--- /dev/null
+++ b/qiita_db/support_files/patches/test_db_sql/92.sql
@@ -0,0 +1,4 @@
+-- .. date ..
+-- tmp file for @Gossty
+
+SELECT 1;
diff --git a/qiita_db/support_files/populate_test_db.sql b/qiita_db/support_files/populate_test_db.sql
index f8b5466b2..12035c788 100644
--- a/qiita_db/support_files/populate_test_db.sql
+++ b/qiita_db/support_files/populate_test_db.sql
@@ -1,584 +1,1612 @@
--- Populate.sql sets the increment to begin at 10000, but all tests expect it to start at 1, so set it back to 1 for the test DB population
-SELECT setval('qiita.study_study_id_seq', 1, false);
-
--- Patch 33.sql sets the increment to begin at 2000, but all tests expect it to start at 1, so set it back to 1 for the test DB population
-SELECT setval('qiita.artifact_artifact_id_seq', 1, false);
-
--- Insert some users in the system. Passwords are 'password' for all users
-INSERT INTO qiita.qiita_user (email, user_level_id, password, name,
- affiliation, address, phone) VALUES
- ('test@foo.bar', 4,
- '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Dude',
- 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302',
- '111-222-3344'),
- ('shared@foo.bar', 4,
- '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Shared',
- 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302',
- '111-222-3344'),
- ('admin@foo.bar', 1,
- '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Admin',
- 'Owner University', '312 noname st, Apt K, Nonexistantown, CO 80302',
- '222-444-6789'),
- ('demo@microbio.me', 4,
- '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Demo',
- 'Qiita Dev', '1345 Colorado Avenue', '303-492-1984');
-
--- Insert some study persons
-INSERT INTO qiita.study_person (name, email, affiliation, address, phone) VALUES
- ('LabDude', 'lab_dude@foo.bar', 'knight lab', '123 lab street', '121-222-3333'),
- ('empDude', 'emp_dude@foo.bar', 'broad', NULL, '444-222-3333'),
- ('PIDude', 'PI_dude@foo.bar', 'Wash U', '123 PI street', NULL);
-
--- Insert a study: EMP 1001
-INSERT INTO qiita.study (email, first_contact,
- funding, timeseries_type_id, lab_person_id, metadata_complete,
- mixs_compliant, most_recent_contact, principal_investigator_id, reprocess,
- spatial_series, study_title, study_alias, study_description,
- study_abstract, vamps_id, ebi_study_accession) VALUES
- ('test@foo.bar', '2014-05-19 16:10', NULL, 1, 1, TRUE, TRUE,
- '2014-05-19 16:11', 3, FALSE, FALSE,
- 'Identification of the Microbiomes for Cannabis Soils', 'Cannabis Soils', 'Analysis of the Cannabis Plant Microbiome',
- 'This is a preliminary study to examine the microbiota associated with the Cannabis plant. Soils samples from the bulk soil, soil associated with the roots, and the rhizosphere were extracted and the DNA sequenced. Roots from three independent plants of different strains were examined. These roots were obtained November 11, 2011 from plants that had been harvested in the summer. Future studies will attempt to analyze the soils and rhizospheres from the same location at different time points in the plant lifecycle.',
- NULL, 'EBI123456-BB');
-
--- Add portal to the study
-INSERT INTO qiita.study_portal (study_id, portal_type_id) VALUES (1, 1);
-
--- Add some environmental packages to the study
-INSERT INTO qiita.study_environmental_package (study_id, environmental_package_name) VALUES (1, 'soil'), (1, 'plant-associated');
-
--- Insert study_users (share study 1 with shared user)
-INSERT INTO qiita.study_users (study_id, email) VALUES (1, 'shared@foo.bar');
-
--- Insert PMIDs for study
-INSERT INTO qiita.publication (doi, pubmed_id) VALUES
- ('10.100/123456', '123456'),
- ('10.100/7891011', '7891011');
-INSERT INTO qiita.study_publication (study_id, publication, is_doi) VALUES
- (1, '10.100/123456', true),
- (1, '123456', false),
- (1, '10.100/7891011', true),
- (1, '7891011', false);
-
--- Insert an investigation
-INSERT INTO qiita.investigation (investigation_name, investigation_description, contact_person_id) VALUES
- ('TestInvestigation', 'An investigation for testing purposes', 3);
-
--- Insert investigation_study (link study 1 with investigation 1)
-INSERT INTO qiita.investigation_study (investigation_id, study_id) VALUES (1, 1);
-
--- Add the study_sample for study 1
-INSERT INTO qiita.study_sample (study_id, sample_id, ebi_sample_accession, biosample_accession) VALUES
- (1, '1.SKB8.640193', 'ERS000000', 'SAMEA0000000'),
- (1, '1.SKD8.640184', 'ERS000001', 'SAMEA0000001'),
- (1, '1.SKB7.640196', 'ERS000002', 'SAMEA0000002'),
- (1, '1.SKM9.640192', 'ERS000003', 'SAMEA0000003'),
- (1, '1.SKM4.640180', 'ERS000004', 'SAMEA0000004'),
- (1, '1.SKM5.640177', 'ERS000005', 'SAMEA0000005'),
- (1, '1.SKB5.640181', 'ERS000006', 'SAMEA0000006'),
- (1, '1.SKD6.640190', 'ERS000007', 'SAMEA0000007'),
- (1, '1.SKB2.640194', 'ERS000008', 'SAMEA0000008'),
- (1, '1.SKD2.640178', 'ERS000009', 'SAMEA0000009'),
- (1, '1.SKM7.640188', 'ERS000010', 'SAMEA0000010'),
- (1, '1.SKB1.640202', 'ERS000011', 'SAMEA0000011'),
- (1, '1.SKD1.640179', 'ERS000012', 'SAMEA0000012'),
- (1, '1.SKD3.640198', 'ERS000013', 'SAMEA0000013'),
- (1, '1.SKM8.640201', 'ERS000014', 'SAMEA0000014'),
- (1, '1.SKM2.640199', 'ERS000015', 'SAMEA0000015'),
- (1, '1.SKB9.640200', 'ERS000016', 'SAMEA0000016'),
- (1, '1.SKD5.640186', 'ERS000017', 'SAMEA0000017'),
- (1, '1.SKM3.640197', 'ERS000018', 'SAMEA0000018'),
- (1, '1.SKD9.640182', 'ERS000019', 'SAMEA0000019'),
- (1, '1.SKB4.640189', 'ERS000020', 'SAMEA0000020'),
- (1, '1.SKD7.640191', 'ERS000021', 'SAMEA0000021'),
- (1, '1.SKM6.640187', 'ERS000022', 'SAMEA0000022'),
- (1, '1.SKD4.640185', 'ERS000023', 'SAMEA0000023'),
- (1, '1.SKB3.640195', 'ERS000024', 'SAMEA0000024'),
- (1, '1.SKB6.640176', 'ERS000025', 'SAMEA0000025'),
- (1, '1.SKM1.640183', 'ERS000025', 'SAMEA0000026');
-
--- Crate the sample_1 dynamic table
-CREATE TABLE qiita.sample_1 (
- sample_id varchar,
- season_environment varchar,
- assigned_from_geo varchar,
- texture varchar,
- taxon_id varchar,
- depth varchar,
- host_taxid varchar,
- common_name varchar,
- water_content_soil varchar,
- elevation varchar,
- temp varchar,
- tot_nitro varchar,
- samp_salinity varchar,
- altitude varchar,
- env_biome varchar,
- country varchar,
- ph varchar,
- anonymized_name varchar,
- tot_org_carb varchar,
- description_duplicate varchar,
- env_feature varchar,
- physical_specimen_location varchar,
- physical_specimen_remaining varchar,
- dna_extracted varchar,
- sample_type varchar,
- env_package varchar default 'soil',
- collection_timestamp varchar,
- host_subject_id varchar,
- description varchar,
- latitude varchar,
- longitude varchar,
- scientific_name varchar,
- CONSTRAINT pk_sample_1 PRIMARY KEY ( sample_id )
-);
-
--- Populates the sample_1 dynamic table
-INSERT INTO qiita.sample_1 (sample_id, season_environment, assigned_from_geo, texture, taxon_id, depth, host_taxid, common_name, water_content_soil, elevation, temp, tot_nitro, samp_salinity, altitude, env_biome, country, ph, anonymized_name, tot_org_carb, description_duplicate, env_feature, physical_specimen_location, physical_specimen_remaining, dna_extracted, sample_type, collection_timestamp, host_subject_id, description, latitude, longitude, scientific_name) VALUES
- ('1.SKM7.640188', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '1118232', '0.15', '3483', 'root metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM7', '3.31', 'Bucu Roots', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B6', 'Cannabis Soil Microbiome', 60.1102854322, 74.7123248382, '1118232'),
- ('1.SKD9.640182', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '1118232', '0.15', '3483', 'root metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKD9', '4.32', 'Diesel Root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D3', 'Cannabis Soil Microbiome', 23.1218032799, 42.838497795, '1118232'),
- ('1.SKM8.640201', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '1118232', '0.15', '3483', 'root metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM8', '3.31', 'Bucu Roots', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D8', 'Cannabis Soil Microbiome', 3.21190859967, 26.8138925876, '1118232'),
- ('1.SKB8.640193', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '1118232', '0.15', '3483', 'root metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB8', '5', 'Burmese root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M7', 'Cannabis Soil Microbiome', 74.0894932572, 65.3283470202, '1118232'),
- ('1.SKD2.640178', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '410658', '0.15', '3483', 'soil metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD2', '4.32', 'Diesel bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B5', 'Cannabis Soil Microbiome', 53.5050692395, 31.6056761814, '1118232'),
- ('1.SKM3.640197', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '410658', '0.15', '3483', 'soil metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM3', '3.31', 'Bucu bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B7', 'Cannabis Soil Microbiome', 'Not applicable', 31.2003474585, '1118232'),
- ('1.SKM4.640180', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM4', '3.31', 'Bucu Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D2', 'Cannabis Soil Microbiome', 'Not applicable', 'Not applicable', '1118232'),
- ('1.SKB9.640200', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '1118232', '0.15', '3483', 'root metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKB9', '5', 'Burmese root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B3', 'Cannabis Soil Microbiome', 12.6245524972, 96.0693176066, '1118232'),
- ('1.SKB4.640189', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB4', '5', 'Burmese Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D7', 'Cannabis Soil Microbiome', 43.9614715197, 82.8516734159, '1118232'),
- ('1.SKB5.640181', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB5', '5', 'Burmese Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M4', 'Cannabis Soil Microbiome', 10.6655599093, 70.784770579, '1118232'),
- ('1.SKB6.640176', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB6', '5', 'Burmese Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D5', 'Cannabis Soil Microbiome', 78.3634273709, 74.423907894, '1118232'),
- ('1.SKM2.640199', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '410658', '0.15', '3483', 'soil metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM2', '3.31', 'Bucu bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D4', 'Cannabis Soil Microbiome', 82.8302905615, 86.3615778099, '1118232'),
- ('1.SKM5.640177', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM5', '3.31', 'Bucu Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M3', 'Cannabis Soil Microbiome', 44.9725384282, 66.1920014699, '1118232'),
- ('1.SKB1.640202', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '410658', '0.15', '3483', 'soil metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB1', '5', 'Burmese bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M2', 'Cannabis Soil Microbiome', 4.59216095574, 63.5115213108, '1118232'),
- ('1.SKD8.640184', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '1118232', '0.15', '3483', 'root metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD8', '4.32', 'Diesel Root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D9', 'Cannabis Soil Microbiome', 57.571893782, 32.5563076447, '1118232'),
- ('1.SKD4.640185', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD4', '4.32', 'Diesel Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M9', 'Cannabis Soil Microbiome', 40.8623799474, 6.66444220187, '1118232'),
- ('1.SKB3.640195', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '410658', '0.15', '3483', 'soil metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB3', '5', 'Burmese bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M6', 'Cannabis Soil Microbiome', 95.2060749748, 27.3592668624, '1118232'),
- ('1.SKM1.640183', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '410658', '0.15', '3483', 'soil metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM1', '3.31', 'Bucu bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D1', 'Cannabis Soil Microbiome', 38.2627021402, 3.48274264219, '1118232'),
- ('1.SKB7.640196', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '1118232', '0.15', '3483', 'root metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB7', '5', 'Burmese root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M8', 'Cannabis Soil Microbiome', 13.089194595, 92.5274472082, '1118232'),
- ('1.SKD3.640198', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '410658', '0.15', '3483', 'soil metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD3', '4.32', 'Diesel bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B1', 'Cannabis Soil Microbiome', 84.0030227585, 66.8954849864, '1118232'),
- ('1.SKD7.640191', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '1118232', '0.15', '3483', 'root metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD7', '4.32', 'Diesel Root', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:D6', 'Cannabis Soil Microbiome', 68.51099627, 2.35063674718, '1118232'),
- ('1.SKD6.640190', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD6', '4.32', 'Diesel Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B9', 'Cannabis Soil Microbiome', 29.1499460692, 82.1270418227, '1118232'),
- ('1.SKB2.640194', 'winter', 'n', '64.6 sand, 17.6 silt, 17.8 clay', '410658', '0.15', '3483', 'soil metagenome', '0.164', '114', '15', '1.41', '7.15', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.94', 'SKB2', '5', 'Burmese bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B4', 'Cannabis Soil Microbiome', 35.2374368957, 68.5041623253, '1118232'),
- ('1.SKM9.640192', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '1118232', '0.15', '3483', 'root metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM9', '3.31', 'Bucu Roots', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B8', 'Cannabis Soil Microbiome', 12.7065957714, 84.9722975792, '1118232'),
- ('1.SKM6.640187', 'winter', 'n', '63.1 sand, 17.7 silt, 19.2 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.101', '114', '15', '1.3', '7.44', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.82', 'SKM6', '3.31', 'Bucu Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:B2', 'Cannabis Soil Microbiome', 0.291867635913, 68.5945325743, '1118232'),
- ('1.SKD5.640186', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '939928', '0.15', '3483', 'rhizosphere metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD5', '4.32', 'Diesel Rhizo', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M1', 'Cannabis Soil Microbiome', 85.4121476399, 15.6526750776, '1118232'),
- ('1.SKD1.640179', 'winter', 'n', '66 sand, 16.3 silt, 17.7 clay', '410658', '0.15', '3483', 'soil metagenome', '0.178', '114', '15', '1.51', '7.1', '0', 'ENVO:Temperate grasslands, savannas, and shrubland biome', 'GAZ:United States of America', '6.8', 'SKD1', '4.32', 'Diesel bulk', 'ENVO:plant-associated habitat', 'ANL', TRUE, TRUE, 'ENVO:soil', '2011-11-11 13:00:00', '1001:M5', 'Cannabis Soil Microbiome', 68.0991287718, 34.8360987059, '1118232');
-
--- Create a new prep template for the added raw data
-INSERT INTO qiita.prep_template (data_type_id, preprocessing_status, investigation_type, artifact_id, name) VALUES
- (2, 'success', 'Metagenomics', NULL, 'Prep information 1'),
- (2, 'success', 'Metagenomics', NULL, 'Prep information 2');
-
--- Add the common prep info for study 1
-INSERT INTO qiita.prep_template_sample (prep_template_id, sample_id, ebi_experiment_accession) VALUES
- (1, '1.SKB8.640193', 'ERX0000000'),
- (1, '1.SKD8.640184', 'ERX0000001'),
- (1, '1.SKB7.640196', 'ERX0000002'),
- (1, '1.SKM9.640192', 'ERX0000003'),
- (1, '1.SKM4.640180', 'ERX0000004'),
- (1, '1.SKM5.640177', 'ERX0000005'),
- (1, '1.SKB5.640181', 'ERX0000006'),
- (1, '1.SKD6.640190', 'ERX0000007'),
- (1, '1.SKB2.640194', 'ERX0000008'),
- (1, '1.SKD2.640178', 'ERX0000009'),
- (1, '1.SKM7.640188', 'ERX0000010'),
- (1, '1.SKB1.640202', 'ERX0000011'),
- (1, '1.SKD1.640179', 'ERX0000012'),
- (1, '1.SKD3.640198', 'ERX0000013'),
- (1, '1.SKM8.640201', 'ERX0000014'),
- (1, '1.SKM2.640199', 'ERX0000015'),
- (1, '1.SKB9.640200', 'ERX0000016'),
- (1, '1.SKD5.640186', 'ERX0000017'),
- (1, '1.SKM3.640197', 'ERX0000018'),
- (1, '1.SKD9.640182', 'ERX0000019'),
- (1, '1.SKB4.640189', 'ERX0000020'),
- (1, '1.SKD7.640191', 'ERX0000021'),
- (1, '1.SKM6.640187', 'ERX0000022'),
- (1, '1.SKD4.640185', 'ERX0000023'),
- (1, '1.SKB3.640195', 'ERX0000024'),
- (1, '1.SKB6.640176', 'ERX0000025'),
- (1, '1.SKM1.640183', 'ERX0000026');
-
--- Add the common prep info for study 2
-INSERT INTO qiita.prep_template_sample (prep_template_id, sample_id, ebi_experiment_accession) VALUES
- (2, '1.SKB8.640193', 'ERX0000000'),
- (2, '1.SKD8.640184', 'ERX0000001'),
- (2, '1.SKB7.640196', 'ERX0000002'),
- (2, '1.SKM9.640192', 'ERX0000003'),
- (2, '1.SKM4.640180', 'ERX0000004'),
- (2, '1.SKM5.640177', 'ERX0000005'),
- (2, '1.SKB5.640181', 'ERX0000006'),
- (2, '1.SKD6.640190', 'ERX0000007'),
- (2, '1.SKB2.640194', 'ERX0000008'),
- (2, '1.SKD2.640178', 'ERX0000009'),
- (2, '1.SKM7.640188', 'ERX0000010'),
- (2, '1.SKB1.640202', 'ERX0000011'),
- (2, '1.SKD1.640179', 'ERX0000012'),
- (2, '1.SKD3.640198', 'ERX0000013'),
- (2, '1.SKM8.640201', 'ERX0000014'),
- (2, '1.SKM2.640199', 'ERX0000015'),
- (2, '1.SKB9.640200', 'ERX0000016'),
- (2, '1.SKD5.640186', 'ERX0000017'),
- (2, '1.SKM3.640197', 'ERX0000018'),
- (2, '1.SKD9.640182', 'ERX0000019'),
- (2, '1.SKB4.640189', 'ERX0000020'),
- (2, '1.SKD7.640191', 'ERX0000021'),
- (2, '1.SKM6.640187', 'ERX0000022'),
- (2, '1.SKD4.640185', 'ERX0000023'),
- (2, '1.SKB3.640195', 'ERX0000024'),
- (2, '1.SKB6.640176', 'ERX0000025'),
- (2, '1.SKM1.640183', 'ERX0000026');
-
--- Crate the prep_1 dynamic table
-CREATE TABLE qiita.prep_1 (
- sample_id varchar,
- barcode varchar,
- LIBRARY_CONSTRUCTION_PROTOCOL varchar,
- primer varchar,
- TARGET_SUBFRAGMENT varchar,
- target_gene varchar,
- RUN_CENTER varchar,
- RUN_PREFIX varchar,
- RUN_DATE varchar,
- EXPERIMENT_CENTER varchar,
- EXPERIMENT_DESIGN_DESCRIPTION varchar,
- EXPERIMENT_TITLE varchar,
- PLATFORM varchar,
- INSTRUMENT_MODEL varchar,
- SAMP_SIZE varchar,
- SEQUENCING_METH varchar,
- illumina_technology varchar,
- SAMPLE_CENTER varchar,
- pcr_primers varchar,
- STUDY_CENTER varchar,
- center_name varchar,
- center_project_name varchar,
- emp_status varchar,
- CONSTRAINT pk_prep_1 PRIMARY KEY ( sample_id )
-);
-
--- Crate the prep_2 dynamic table
-CREATE TABLE qiita.prep_2 (
- sample_id varchar,
- barcode varchar,
- LIBRARY_CONSTRUCTION_PROTOCOL varchar,
- primer varchar,
- TARGET_SUBFRAGMENT varchar,
- target_gene varchar,
- RUN_CENTER varchar,
- RUN_PREFIX varchar,
- RUN_DATE varchar,
- EXPERIMENT_CENTER varchar,
- EXPERIMENT_DESIGN_DESCRIPTION varchar,
- EXPERIMENT_TITLE varchar,
- PLATFORM varchar,
- INSTRUMENT_MODEL varchar,
- SAMP_SIZE varchar,
- SEQUENCING_METH varchar,
- illumina_technology varchar,
- SAMPLE_CENTER varchar,
- pcr_primers varchar,
- STUDY_CENTER varchar,
- center_name varchar,
- center_project_name varchar,
- emp_status varchar,
- CONSTRAINT pk_prep_2 PRIMARY KEY ( sample_id )
-);
-
--- Populates the prep_1 dynamic table
-INSERT INTO qiita.prep_1 (sample_id, barcode, LIBRARY_CONSTRUCTION_PROTOCOL, primer, TARGET_SUBFRAGMENT, target_gene, RUN_CENTER, RUN_PREFIX, RUN_DATE, EXPERIMENT_CENTER, EXPERIMENT_DESIGN_DESCRIPTION, EXPERIMENT_TITLE, PLATFORM, INSTRUMENT_MODEL, SAMP_SIZE, SEQUENCING_METH, illumina_technology, SAMPLE_CENTER, pcr_primers, STUDY_CENTER, center_name, center_project_name, emp_status) VALUES
- ('1.SKB1.640202', 'GTCCGCAAGTTA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB2.640194', 'CGTAGAGCTCTC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB3.640195', 'CCTCTGAGAGCT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB4.640189', 'CCTCGATGCAGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB5.640181', 'GCGGACTATTCA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB6.640176', 'CGTGCACAATTG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB7.640196', 'CGGCCTAAGTTC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB8.640193', 'AGCGCTCACATC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB9.640200', 'TGGTTATGGCAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD1.640179', 'CGAGGTTCTGAT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD2.640178', 'AACTCCTGTGGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD3.640198', 'TAATGGTCGTAG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD4.640185', 'TTGCACCGTCGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD5.640186', 'TGCTACAGACGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD6.640190', 'ATGGCCTGACTA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD7.640191', 'ACGCACATACAA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD8.640184', 'TGAGTGGTCTGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD9.640182', 'GATAGCACTCGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM1.640183', 'TAGCGCGAACTT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM2.640199', 'CATACACGCACC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM3.640197', 'ACCTCAGTCAAG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM4.640180', 'TCGACCAAACAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM5.640177', 'CCACCCAGTAAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM6.640187', 'ATATCGCGATGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM7.640188', 'CGCCGGTAATCT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM8.640201', 'CCGATGCCTTGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM9.640192', 'AGCAGGCACGAA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP');
-
-
--- Populates the prep_2 dynamic table
-INSERT INTO qiita.prep_2 (sample_id, barcode, LIBRARY_CONSTRUCTION_PROTOCOL, primer, TARGET_SUBFRAGMENT, target_gene, RUN_CENTER, RUN_PREFIX, RUN_DATE, EXPERIMENT_CENTER, EXPERIMENT_DESIGN_DESCRIPTION, EXPERIMENT_TITLE, PLATFORM, INSTRUMENT_MODEL, SAMP_SIZE, SEQUENCING_METH, illumina_technology, SAMPLE_CENTER, pcr_primers, STUDY_CENTER, center_name, center_project_name, emp_status) VALUES
- ('1.SKB1.640202', 'GTCCGCAAGTTA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB2.640194', 'CGTAGAGCTCTC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB3.640195', 'CCTCTGAGAGCT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB4.640189', 'CCTCGATGCAGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB5.640181', 'GCGGACTATTCA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB6.640176', 'CGTGCACAATTG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB7.640196', 'CGGCCTAAGTTC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB8.640193', 'AGCGCTCACATC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKB9.640200', 'TGGTTATGGCAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD1.640179', 'CGAGGTTCTGAT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD2.640178', 'AACTCCTGTGGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD3.640198', 'TAATGGTCGTAG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD4.640185', 'TTGCACCGTCGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD5.640186', 'TGCTACAGACGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD6.640190', 'ATGGCCTGACTA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD7.640191', 'ACGCACATACAA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD8.640184', 'TGAGTGGTCTGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKD9.640182', 'GATAGCACTCGT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM1.640183', 'TAGCGCGAACTT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM2.640199', 'CATACACGCACC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM3.640197', 'ACCTCAGTCAAG', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM4.640180', 'TCGACCAAACAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM5.640177', 'CCACCCAGTAAC', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM6.640187', 'ATATCGCGATGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM7.640188', 'CGCCGGTAATCT', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM8.640201', 'CCGATGCCTTGA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP'),
- ('1.SKM9.640192', 'AGCAGGCACGAA', 'This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions.', 'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL', 's_G1_L001_sequences', '8/1/12', 'ANL', 'micro biome of soil and rhizosphere of cannabis plants from CA', 'Cannabis Soil Microbiome', 'Illumina', 'Illumina MiSeq', '.25,g', 'Sequencing by synthesis', 'MiSeq', 'ANL', 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME', 'ANL', NULL, 'EMP');
-
--- Link the prep template to the study
-INSERT INTO qiita.study_prep_template (study_id, prep_template_id) VALUES (1, 1);
-INSERT INTO qiita.study_prep_template (study_id, prep_template_id) VALUES (1, 2);
-
--- Insert some artifacts
-INSERT INTO qiita.artifact (generated_timestamp, command_id, command_parameters, visibility_id, artifact_type_id, data_type_id, submitted_to_vamps, name) VALUES
- ('2012-10-01 09:30:27', NULL, NULL, 3, 3, 2, false, 'Raw data 1'),
- ('2012-10-01 10:30:27', 1, '{"max_barcode_errors": "1.5", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "sequence_max_n": "0", "rev_comp_mapping_barcodes": "False", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, 6, 2, false, 'Demultiplexed 1'),
- ('2012-10-01 11:30:27', 1, '{"max_barcode_errors": "1.5", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "sequence_max_n": "0", "rev_comp_mapping_barcodes": "True", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, 6, 2, false, 'Demultiplexed 2'),
- ('2012-10-02 17:30:00', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 2, false, 'BIOM'),
- ('2012-10-02 17:30:00', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 2, false, 'BIOM'),
- ('2012-10-02 17:30:00', 3, '{"reference": "2", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 1, false, 'BIOM'),
- ('2012-10-02 17:30:00', NULL, NULL, 3, 7, 1, false, 'BIOM'),
- ('2018-12-03 14:06:45.117389', NULL, NULL, 4, 7, 2, false, 'noname'),
- ('2018-12-03 14:06:45.117389', 12, '{"biom_table": "8", "depth": "9000", "subsample_multinomial": "False"}', 4, 7, 2, false, 'noname');
-
--- link new artifacts with prep info files
-UPDATE qiita.prep_template SET artifact_id = 1 WHERE prep_template_id = 1;
-UPDATE qiita.prep_template SET artifact_id = 7 WHERE prep_template_id = 2;
-
--- Link the child artifacts with their parents artifacts
-INSERT INTO qiita.parent_artifact (parent_id, artifact_id)
- VALUES (1, 2), (1, 3),
- (2, 4), (2, 5), (2, 6), (8, 9);
-
--- Insert filepaths for the artifacts and reference
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id)
- VALUES ('1_s_G1_L001_sequences.fastq.gz', 1, '852952723', 1, 5),
- ('1_s_G1_L001_sequences_barcodes.fastq.gz', 3, '852952723', 1, 5),
- ('1_seqs.fna', 4, '852952723', 1, 3),
- ('1_seqs.qual', 5, '852952723', 1, 3),
- ('1_seqs.demux', 6, 852952723, 1, 3),
- ('GreenGenes_13_8_97_otus.fasta', 10, '852952723', 1, 6),
- ('GreenGenes_13_8_97_otu_taxonomy.txt', 11, '852952723', 1, 6),
- ('GreenGenes_13_8_97_otus.tree', 12, '852952723', 1, 6),
- ('1_study_1001_closed_reference_otu_table.biom', 7, '852952723', 1, 4),
- ('Silva_97_otus.fasta', 10, '852952723', 1, 6),
- ('Silva_97_otu_taxonomy.txt', 11, '852952723', 1, 6),
- ('1_study_1001_closed_reference_otu_table_Silva.biom', 7, '852952723', 1, 4);
-
--- Link the artifact with the prep template
-UPDATE qiita.prep_template SET artifact_id = 1 WHERE prep_template_id = 1;
-UPDATE qiita.prep_template SET artifact_id = 7 WHERE prep_template_id = 2;
-
--- Link the study with the artifacts
-INSERT INTO qiita.study_artifact (study_id, artifact_id)
- VALUES (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7);
-
--- Insert EBI information for artifact 2
-INSERT INTO qiita.ebi_run_accession (sample_id, artifact_id, ebi_run_accession)
- VALUES ('1.SKB1.640202', 2, 'ERR0000001'),
- ('1.SKB2.640194', 2, 'ERR0000002'),
- ('1.SKB3.640195', 2, 'ERR0000003'),
- ('1.SKB4.640189', 2, 'ERR0000004'),
- ('1.SKB5.640181', 2, 'ERR0000005'),
- ('1.SKB6.640176', 2, 'ERR0000006'),
- ('1.SKB7.640196', 2, 'ERR0000007'),
- ('1.SKB8.640193', 2, 'ERR0000008'),
- ('1.SKB9.640200', 2, 'ERR0000009'),
- ('1.SKD1.640179', 2, 'ERR0000010'),
- ('1.SKD2.640178', 2, 'ERR0000011'),
- ('1.SKD3.640198', 2, 'ERR0000012'),
- ('1.SKD4.640185', 2, 'ERR0000013'),
- ('1.SKD5.640186', 2, 'ERR0000014'),
- ('1.SKD6.640190', 2, 'ERR0000015'),
- ('1.SKD7.640191', 2, 'ERR0000016'),
- ('1.SKD8.640184', 2, 'ERR0000017'),
- ('1.SKD9.640182', 2, 'ERR0000018'),
- ('1.SKM1.640183', 2, 'ERR0000019'),
- ('1.SKM2.640199', 2, 'ERR0000020'),
- ('1.SKM3.640197', 2, 'ERR0000021'),
- ('1.SKM4.640180', 2, 'ERR0000022'),
- ('1.SKM5.640177', 2, 'ERR0000023'),
- ('1.SKM6.640187', 2, 'ERR0000024'),
- ('1.SKM7.640188', 2, 'ERR0000025'),
- ('1.SKM8.640201', 2, 'ERR0000026'),
- ('1.SKM9.640192', 2, 'ERR0000027');
-
--- Populate the reference table
-INSERT INTO qiita.reference (reference_name, reference_version, sequence_filepath, taxonomy_filepath, tree_filepath) VALUES
-('Greengenes', '13_8', 6, 7, 8),
-('Silva', 'test', 10, 11, NULL);
-
--- Insert filepath for job results files
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES
-('1_job_result.txt', 9, '852952723', 1, 2),
-('2_test_folder', 8, '852952723', 1, 2);
-
--- Insert Analysis
-INSERT INTO qiita.analysis (email, name, description, pmid, "timestamp", dflt, logging_id) VALUES
- ('test@foo.bar', 'SomeAnalysis', 'A test analysis', '121112', '2018-12-03 13:52:42.751331-07', false, NULL),
- ('admin@foo.bar', 'SomeSecondAnalysis', 'Another test analysis', '22221112', '2018-12-03 13:52:42.751331-07', false, NULL),
- ('test@foo.bar', 'test@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('admin@foo.bar', 'admin@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('shared@foo.bar', 'shared@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('demo@microbio.me', 'demo@microbio.me-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('test@foo.bar', 'test@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('admin@foo.bar', 'admin@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('shared@foo.bar', 'shared@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL),
- ('demo@microbio.me', 'demo@microbio.me-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL);
-INSERT INTO qiita.analysis_portal (analysis_id, portal_type_id) VALUES
- (1, 1), (2, 1), (3, 1), (4, 1),(5, 1), (6, 1), (7, 2), (8, 2), (9, 2), (10, 2);
-
-INSERT INTO qiita.analysis_artifact (analysis_id, artifact_id) VALUES
- (1, 8),
- (1, 9);
-
--- Insert filepath for analysis biom files
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES
- ('1_analysis_18S.biom', 7, '852952723', 1, 1),
- ('1_analysis_mapping.txt', 9, '852952723', 1, 1);
-
--- Attach filepath to analysis
-INSERT INTO qiita.analysis_filepath (analysis_id, filepath_id, data_type_id) VALUES
- (1, 15, 2),
- (1, 16, NULL);
-
--- Attach samples to analysis
-INSERT INTO qiita.analysis_sample (analysis_id, artifact_id, sample_id) VALUES
-(1, 4, '1.SKB8.640193'), (1, 4, '1.SKD8.640184'), (1, 4, '1.SKB7.640196'), (1, 4, '1.SKM9.640192'), (1, 4, '1.SKM4.640180'),
-(2, 4, '1.SKB8.640193'), (2, 4, '1.SKD8.640184'), (2, 4, '1.SKB7.640196'), (2, 4, '1.SKM3.640197'),
-(1, 5, '1.SKB8.640193'), (1, 5, '1.SKD8.640184'), (1, 5, '1.SKB7.640196'), (1, 5, '1.SKM9.640192'), (1, 5, '1.SKM4.640180'),
-(2, 5, '1.SKB8.640193'), (2, 5, '1.SKD8.640184'), (2, 5, '1.SKB7.640196'), (2, 5, '1.SKM3.640197'),
-(1, 6, '1.SKB8.640193'), (1, 6, '1.SKD8.640184'), (1, 6, '1.SKB7.640196'), (1, 6, '1.SKM9.640192'), (1, 6, '1.SKM4.640180'),
-(2, 6, '1.SKB8.640193'), (2, 6, '1.SKD8.640184'), (2, 6, '1.SKB7.640196'), (2, 6, '1.SKM3.640197'),
-(3, 4, '1.SKD8.640184'), (3, 4, '1.SKB7.640196'), (3, 4, '1.SKM9.640192'), (3, 4, '1.SKM4.640180');
-
--- Share analysis with shared user
-INSERT INTO qiita.analysis_users (analysis_id, email) VALUES (1, 'shared@foo.bar');
-
--- Add an ontology
-INSERT INTO qiita.ontology (ontology_id, ontology, fully_loaded, fullname, query_url, source_url, definition, load_date) VALUES (999999999, E'ENA', E'1', E'European Nucleotide Archive Submission Ontology', NULL, E'http://www.ebi.ac.uk/embl/Documentation/ENA-Reads.html', E'The ENA CV is to be used to annotate XML submissions to the ENA.', '2009-02-23 00:00:00');
-
--- Add some ontology values
-INSERT INTO qiita.term (term_id, ontology_id, term, identifier, definition, namespace, is_obsolete, is_root_term, is_leaf) VALUES (2052508974, 999999999, E'WGS', E'ENA:0000059', NULL, NULL, NULL, NULL, NULL);
-INSERT INTO qiita.term (term_id, ontology_id, term, identifier, definition, namespace, is_obsolete, is_root_term, is_leaf) VALUES (2052508975, 999999999, E'Metagenomics', E'ENA:0000060', NULL, NULL, NULL, NULL, NULL);
-INSERT INTO qiita.term (term_id, ontology_id, term, identifier, definition, namespace, is_obsolete, is_root_term, is_leaf) VALUES (2052508976, 999999999, E'Amplicon', E'ENA:0000061', NULL, NULL, NULL, NULL, NULL);
-INSERT INTO qiita.term (term_id, ontology_id, term, identifier, definition, namespace, is_obsolete, is_root_term, is_leaf) VALUES (2052508984, 999999999, E'RNA-Seq', E'ENA:0000070', NULL, NULL, NULL, NULL, NULL);
-INSERT INTO qiita.term (term_id, ontology_id, term, identifier, definition, namespace, is_obsolete, is_root_term, is_leaf) VALUES (2052508987, 999999999, E'Other', E'ENA:0000069', NULL, NULL, NULL, NULL, NULL);
-
--- Create the new sample_template_filepath
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES ('1_19700101-000000.txt', 14, '852952723', 1, 9);
+--
+-- PostgreSQL database dump
+--
+
+-- Dumped from database version 13.9
+-- Dumped by pg_dump version 13.9
+
+-- SET statement_timeout = 0;
+-- SET lock_timeout = 0;
+-- SET idle_in_transaction_session_timeout = 0;
+-- SET client_encoding = 'UTF8';
+-- SET standard_conforming_strings = on;
+-- SELECT pg_catalog.set_config('search_path', '', false);
+-- SET check_function_bodies = false;
+-- SET xmloption = content;
+-- SET client_min_messages = warning;
+-- SET row_security = off;
+
+--
+-- Data for Name: severity; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.severity VALUES (1, 'Warning');
+INSERT INTO qiita.severity VALUES (2, 'Runtime');
+INSERT INTO qiita.severity VALUES (3, 'Fatal');
+
+
+--
+-- Data for Name: logging; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.logging VALUES (1, '2015-11-22 21:29:30', 2, 'Error message', NULL);
+INSERT INTO qiita.logging VALUES (2, '2015-11-22 21:29:30', 2, 'Error message', '{}');
+
+
+--
+-- Data for Name: user_level; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.user_level VALUES (2, 'dev', 'Can access all data and info about errors', '--nice=10000');
+INSERT INTO qiita.user_level VALUES (3, 'superuser', 'Can see all studies, can run analyses', '--nice=10000');
+INSERT INTO qiita.user_level VALUES (4, 'user', 'Can see own and public data, can run analyses', '--nice=10000');
+INSERT INTO qiita.user_level VALUES (5, 'unverified', 'Email not verified', '--nice=10000');
+INSERT INTO qiita.user_level VALUES (6, 'guest', 'Can view & download public data', '--nice=10000');
+INSERT INTO qiita.user_level VALUES (1, 'admin', 'Can access and do all the things', '--nice=5000');
+INSERT INTO qiita.user_level VALUES (7, 'wet-lab admin', 'Can access the private jobs', '');
+
+
+--
+-- Data for Name: qiita_user; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.qiita_user VALUES ('test@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Dude', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL, false);
+INSERT INTO qiita.qiita_user VALUES ('shared@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Shared', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL, false);
+INSERT INTO qiita.qiita_user VALUES ('admin@foo.bar', 1, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Admin', 'Owner University', '312 noname st, Apt K, Nonexistantown, CO 80302', '222-444-6789', NULL, NULL, NULL, false);
+INSERT INTO qiita.qiita_user VALUES ('demo@microbio.me', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Demo', 'Qiita Dev', '1345 Colorado Avenue', '303-492-1984', NULL, NULL, NULL, false);
+
+
+--
+-- Data for Name: analysis; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis VALUES (1, 'test@foo.bar', 'SomeAnalysis', 'A test analysis', '121112', '2018-12-03 13:52:42.751331-07', false, NULL, '');
+INSERT INTO qiita.analysis VALUES (2, 'admin@foo.bar', 'SomeSecondAnalysis', 'Another test analysis', '22221112', '2018-12-03 13:52:42.751331-07', false, NULL, '');
+INSERT INTO qiita.analysis VALUES (3, 'test@foo.bar', 'test@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (4, 'admin@foo.bar', 'admin@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (5, 'shared@foo.bar', 'shared@foo.bar-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (6, 'demo@microbio.me', 'demo@microbio.me-dflt-1', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (7, 'test@foo.bar', 'test@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (8, 'admin@foo.bar', 'admin@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (9, 'shared@foo.bar', 'shared@foo.bar-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+INSERT INTO qiita.analysis VALUES (10, 'demo@microbio.me', 'demo@microbio.me-dflt-2', 'dflt', NULL, '2018-12-03 13:52:42.751331-07', true, NULL, '');
+
+
+--
+-- Data for Name: artifact_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact_type VALUES (1, 'SFF', NULL, false, false, false);
+INSERT INTO qiita.artifact_type VALUES (4, 'FASTA', NULL, false, false, false);
+INSERT INTO qiita.artifact_type VALUES (2, 'FASTA_Sanger', NULL, false, false, false);
+INSERT INTO qiita.artifact_type VALUES (6, 'Demultiplexed', 'Demultiplexed and QC sequences', true, true, false);
+INSERT INTO qiita.artifact_type VALUES (8, 'beta_div_plots', 'Qiime 1 beta diversity results', false, false, false);
+INSERT INTO qiita.artifact_type VALUES (9, 'rarefaction_curves', 'Rarefaction curves', false, false, false);
+INSERT INTO qiita.artifact_type VALUES (10, 'taxa_summary', 'Taxa summary plots', false, false, false);
+INSERT INTO qiita.artifact_type VALUES (3, 'FASTQ', NULL, false, false, true);
+INSERT INTO qiita.artifact_type VALUES (5, 'per_sample_FASTQ', NULL, true, false, true);
+INSERT INTO qiita.artifact_type VALUES (7, 'BIOM', 'BIOM table', false, false, true);
+
+
+--
+-- Data for Name: data_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.data_type VALUES (1, '16S');
+INSERT INTO qiita.data_type VALUES (2, '18S');
+INSERT INTO qiita.data_type VALUES (3, 'ITS');
+INSERT INTO qiita.data_type VALUES (4, 'Proteomic');
+INSERT INTO qiita.data_type VALUES (5, 'Metabolomic');
+INSERT INTO qiita.data_type VALUES (6, 'Metagenomic');
+INSERT INTO qiita.data_type VALUES (7, 'Multiomic');
+INSERT INTO qiita.data_type VALUES (8, 'Metatranscriptomics');
+INSERT INTO qiita.data_type VALUES (9, 'Viromics');
+INSERT INTO qiita.data_type VALUES (10, 'Genomics');
+INSERT INTO qiita.data_type VALUES (11, 'Transcriptomics');
+INSERT INTO qiita.data_type VALUES (12, 'Job Output Folder');
+
+
+--
+-- Data for Name: software_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.software_type VALUES (1, 'artifact transformation', 'A plugin that performs some kind of processing/transformation/manipulation over an artifact.');
+INSERT INTO qiita.software_type VALUES (2, 'artifact definition', 'A plugin that defines new artifact types.');
+INSERT INTO qiita.software_type VALUES (3, 'private', 'Internal Qiita jobs');
+
+
+--
+-- Data for Name: software; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.software VALUES (2, 'BIOM type', '2.1.4 - Qiime2', 'The Biological Observation Matrix format', 'source ~/virtualenv/python2.7/bin/activate; export PATH=$HOME/miniconda3/bin/:$PATH; . activate qtp-biom', 'start_biom', 2, false, false);
+INSERT INTO qiita.software VALUES (3, 'Target Gene type', '0.1.0', 'Target gene artifact types plugin', 'source ~/virtualenv/python2.7/bin/activate; export PATH=$HOME/miniconda3/bin/:$PATH; source activate qiita', 'start_target_gene_types', 2, false, false);
+INSERT INTO qiita.software VALUES (4, 'Qiita', 'alpha', 'Internal Qiita jobs', 'source /home/runner/.profile; conda activate qiita', 'qiita-private-plugin', 3, true, false);
+INSERT INTO qiita.software VALUES (1, 'QIIMEq2', '1.9.1', 'Quantitative Insights Into Microbial Ecology (QIIME) is an open-source bioinformatics pipeline for performing microbiome analysis from raw DNA sequencing data', 'source activate qiita', 'start_target_gene', 1, false, false);
+
+
+--
+-- Data for Name: software_command; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.software_command VALUES (1, 'Split libraries FASTQ', 1, 'Demultiplexes and applies quality control to FASTQ data', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (2, 'Split libraries', 1, 'Demultiplexes and applies quality control to FASTA data', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (3, 'Pick closed-reference OTUs', 1, 'OTU picking using a closed reference approach', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (4, 'Validate', 2, 'Validates a new artifact of type BIOM', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (5, 'Generate HTML summary', 2, 'Generates the HTML summary of a BIOM artifact', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (6, 'Validate', 3, 'Validates a new artifact of the given target gene type', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (7, 'Generate HTML summary', 3, 'Generates the HTML summary of a given target gene type artifact', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (8, 'build_analysis_files', 4, 'Builds the files needed for the analysis', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (9, 'Summarize Taxa', 1, 'Plots taxonomy summaries at different taxonomy levels', true, true, false, NULL);
+INSERT INTO qiita.software_command VALUES (10, 'Beta Diversity', 1, 'Computes and plots beta diversity results', true, true, false, NULL);
+INSERT INTO qiita.software_command VALUES (11, 'Alpha Rarefaction', 1, 'Computes and plots alpha rarefaction results', true, true, false, NULL);
+INSERT INTO qiita.software_command VALUES (12, 'Single Rarefaction', 1, 'Rarefies the input table by random sampling without replacement', true, true, false, NULL);
+INSERT INTO qiita.software_command VALUES (13, 'release_validators', 4, 'Releases the job validators', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (14, 'submit_to_VAMPS', 4, 'submits an artifact to VAMPS', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (15, 'copy_artifact', 4, 'Creates a copy of an artifact', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (16, 'submit_to_EBI', 4, 'submits an artifact to EBI', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (17, 'delete_artifact', 4, 'Delete an artifact', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (18, 'create_sample_template', 4, 'Create a sample template', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (19, 'update_sample_template', 4, 'Updates the sample template', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (20, 'delete_study', 4, 'Deletes a full study', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (21, 'delete_sample_template', 4, 'Deletes a sample template', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (22, 'update_prep_template', 4, 'Updates the prep template', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (23, 'delete_sample_or_column', 4, 'Deletes a sample or a columns from the metadata', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (24, 'complete_job', 4, 'Completes a given job', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (25, 'delete_analysis', 4, 'Deletes a full analysis', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (26, 'list_remote_files', 4, 'retrieves list of valid study files from remote dir', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (27, 'download_remote_files', 4, 'downloads valid study files from remote dir', true, false, false, NULL);
+INSERT INTO qiita.software_command VALUES (28, 'INSDC_download', 4, 'Downloads an accession from a given INSDC', true, false, false, NULL);
+
+
+--
+-- Data for Name: visibility; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.visibility VALUES (1, 'awaiting_approval', 'Awaiting approval of metadata');
+INSERT INTO qiita.visibility VALUES (4, 'sandbox', 'Only available to the owner. No sharing');
+INSERT INTO qiita.visibility VALUES (3, 'private', 'Only visible to the owner and shared users');
+INSERT INTO qiita.visibility VALUES (2, 'public', 'Visible to everybody');
+INSERT INTO qiita.visibility VALUES (5, 'archived', 'Archived artifact');
+
+
+--
+-- Data for Name: artifact; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact VALUES (1, '2012-10-01 09:30:27', NULL, NULL, 3, 3, 2, false, 'Raw data 1', NULL);
+INSERT INTO qiita.artifact VALUES (2, '2012-10-01 10:30:27', 1, '{"max_barcode_errors": "1.5", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "sequence_max_n": "0", "rev_comp_mapping_barcodes": "False", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, 6, 2, false, 'Demultiplexed 1', NULL);
+INSERT INTO qiita.artifact VALUES (3, '2012-10-01 11:30:27', 1, '{"max_barcode_errors": "1.5", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "sequence_max_n": "0", "rev_comp_mapping_barcodes": "True", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, 6, 2, false, 'Demultiplexed 2', NULL);
+INSERT INTO qiita.artifact VALUES (4, '2012-10-02 17:30:00', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 2, false, 'BIOM', NULL);
+INSERT INTO qiita.artifact VALUES (5, '2012-10-02 17:30:00', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 2, false, 'BIOM', NULL);
+INSERT INTO qiita.artifact VALUES (6, '2012-10-02 17:30:00', 3, '{"reference": "2", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, 7, 1, false, 'BIOM', NULL);
+INSERT INTO qiita.artifact VALUES (7, '2012-10-02 17:30:00', NULL, NULL, 3, 7, 1, false, 'BIOM', NULL);
+INSERT INTO qiita.artifact VALUES (8, '2018-12-03 14:06:45.117389', NULL, NULL, 4, 7, 2, false, 'noname', NULL);
+INSERT INTO qiita.artifact VALUES (9, '2018-12-03 14:06:45.117389', 12, '{"biom_table": "8", "depth": "9000", "subsample_multinomial": "False"}', 4, 7, 2, false, 'noname', NULL);
+
+
+--
+-- Data for Name: analysis_artifact; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis_artifact VALUES (1, 8);
+INSERT INTO qiita.analysis_artifact VALUES (1, 9);
+
+
+--
+-- Data for Name: checksum_algorithm; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.checksum_algorithm VALUES (1, 'crc32');
+
+
+--
+-- Data for Name: data_directory; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.data_directory VALUES (1, 'analysis', 'analysis', false, true);
+INSERT INTO qiita.data_directory VALUES (2, 'job', 'job', false, true);
+INSERT INTO qiita.data_directory VALUES (3, 'preprocessed_data', 'preprocessed_data', false, true);
+INSERT INTO qiita.data_directory VALUES (4, 'processed_data', 'processed_data', false, true);
+INSERT INTO qiita.data_directory VALUES (5, 'raw_data', 'raw_data', false, true);
+INSERT INTO qiita.data_directory VALUES (6, 'reference', 'reference', false, true);
+INSERT INTO qiita.data_directory VALUES (7, 'uploads', 'uploads', false, true);
+INSERT INTO qiita.data_directory VALUES (8, 'working_dir', 'working_dir', false, true);
+INSERT INTO qiita.data_directory VALUES (9, 'templates', 'templates', false, true);
+INSERT INTO qiita.data_directory VALUES (10, 'SFF', 'SFF', true, true);
+INSERT INTO qiita.data_directory VALUES (11, 'FASTQ', 'FASTQ', true, true);
+INSERT INTO qiita.data_directory VALUES (12, 'FASTA', 'FASTA', true, true);
+INSERT INTO qiita.data_directory VALUES (13, 'FASTA_Sanger', 'FASTA_Sanger', true, true);
+INSERT INTO qiita.data_directory VALUES (14, 'per_sample_FASTQ', 'per_sample_FASTQ', true, true);
+INSERT INTO qiita.data_directory VALUES (15, 'Demultiplexed', 'Demultiplexed', true, true);
+INSERT INTO qiita.data_directory VALUES (16, 'BIOM', 'BIOM', true, true);
+
+
+--
+-- Data for Name: filepath_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.filepath_type VALUES (1, 'raw_forward_seqs');
+INSERT INTO qiita.filepath_type VALUES (2, 'raw_reverse_seqs');
+INSERT INTO qiita.filepath_type VALUES (3, 'raw_barcodes');
+INSERT INTO qiita.filepath_type VALUES (4, 'preprocessed_fasta');
+INSERT INTO qiita.filepath_type VALUES (5, 'preprocessed_fastq');
+INSERT INTO qiita.filepath_type VALUES (6, 'preprocessed_demux');
+INSERT INTO qiita.filepath_type VALUES (7, 'biom');
+INSERT INTO qiita.filepath_type VALUES (8, 'directory');
+INSERT INTO qiita.filepath_type VALUES (9, 'plain_text');
+INSERT INTO qiita.filepath_type VALUES (10, 'reference_seqs');
+INSERT INTO qiita.filepath_type VALUES (11, 'reference_tax');
+INSERT INTO qiita.filepath_type VALUES (12, 'reference_tree');
+INSERT INTO qiita.filepath_type VALUES (13, 'log');
+INSERT INTO qiita.filepath_type VALUES (14, 'sample_template');
+INSERT INTO qiita.filepath_type VALUES (15, 'prep_template');
+INSERT INTO qiita.filepath_type VALUES (16, 'qiime_map');
+INSERT INTO qiita.filepath_type VALUES (17, 'raw_sff');
+INSERT INTO qiita.filepath_type VALUES (18, 'raw_fasta');
+INSERT INTO qiita.filepath_type VALUES (19, 'raw_qual');
+INSERT INTO qiita.filepath_type VALUES (20, 'html_summary');
+INSERT INTO qiita.filepath_type VALUES (21, 'tgz');
+INSERT INTO qiita.filepath_type VALUES (22, 'html_summary_dir');
+INSERT INTO qiita.filepath_type VALUES (23, 'qzv');
+INSERT INTO qiita.filepath_type VALUES (24, 'qza');
+INSERT INTO qiita.filepath_type VALUES (25, 'bam');
+
+
+--
+-- Data for Name: filepath; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.filepath VALUES (1, '1_s_G1_L001_sequences.fastq.gz', 1, '2125826711', 1, 5, 58);
+INSERT INTO qiita.filepath VALUES (2, '1_s_G1_L001_sequences_barcodes.fastq.gz', 3, '2125826711', 1, 5, 58);
+INSERT INTO qiita.filepath VALUES (3, '1_seqs.fna', 4, '', 1, 3, 0);
+INSERT INTO qiita.filepath VALUES (4, '1_seqs.qual', 5, '', 1, 3, 0);
+INSERT INTO qiita.filepath VALUES (5, '1_seqs.demux', 6, '', 1, 3, 0);
+INSERT INTO qiita.filepath VALUES (6, 'GreenGenes_13_8_97_otus.fasta', 10, '852952723', 1, 6, 1);
+INSERT INTO qiita.filepath VALUES (7, 'GreenGenes_13_8_97_otu_taxonomy.txt', 11, '852952723', 1, 6, 1);
+INSERT INTO qiita.filepath VALUES (8, 'GreenGenes_13_8_97_otus.tree', 12, '852952723', 1, 6, 1);
+INSERT INTO qiita.filepath VALUES (9, '1_study_1001_closed_reference_otu_table.biom', 7, '1579715020', 1, 4, 1256812);
+INSERT INTO qiita.filepath VALUES (10, 'Silva_97_otus.fasta', 10, '', 1, 6, 0);
+INSERT INTO qiita.filepath VALUES (11, 'Silva_97_otu_taxonomy.txt', 11, '', 1, 6, 0);
+INSERT INTO qiita.filepath VALUES (12, '1_study_1001_closed_reference_otu_table_Silva.biom', 7, '1579715020', 1, 4, 1256812);
+INSERT INTO qiita.filepath VALUES (13, '1_job_result.txt', 9, '0', 1, 2, 0);
+INSERT INTO qiita.filepath VALUES (14, '2_test_folder', 8, '', 1, 2, 0);
+INSERT INTO qiita.filepath VALUES (15, '1_analysis_18S.biom', 7, '1756512010', 1, 1, 1093210);
+INSERT INTO qiita.filepath VALUES (16, '1_analysis_mapping.txt', 9, '291340704', 1, 1, 7813);
+INSERT INTO qiita.filepath VALUES (17, '1_19700101-000000.txt', 14, '1486964984', 1, 9, 10309);
+INSERT INTO qiita.filepath VALUES (18, '1_prep_1_19700101-000000.txt', 15, '3703494589', 1, 9, 26051);
+INSERT INTO qiita.filepath VALUES (19, '1_prep_1_qiime_19700101-000000.txt', 16, '3053485441', 1, 9, 36780);
+INSERT INTO qiita.filepath VALUES (20, '1_prep_1_19700101-000000.txt', 15, '3703494589', 1, 9, 26051);
+INSERT INTO qiita.filepath VALUES (21, '1_prep_1_qiime_19700101-000000.txt', 16, '3053485441', 1, 9, 36780);
+INSERT INTO qiita.filepath VALUES (22, 'biom_table.biom', 7, '1756512010', 1, 16, 1093210);
+
+
+--
+-- Data for Name: analysis_filepath; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis_filepath VALUES (1, 15, 2);
+INSERT INTO qiita.analysis_filepath VALUES (1, 16, NULL);
+
+
+--
+-- Data for Name: portal_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.portal_type VALUES (2, 'EMP', 'EMP portal');
+INSERT INTO qiita.portal_type VALUES (1, 'QIITA', 'QIITA portal. Access to all data stored in database.');
+
+
+--
+-- Data for Name: analysis_portal; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis_portal VALUES (1, 1);
+INSERT INTO qiita.analysis_portal VALUES (2, 1);
+INSERT INTO qiita.analysis_portal VALUES (3, 1);
+INSERT INTO qiita.analysis_portal VALUES (4, 1);
+INSERT INTO qiita.analysis_portal VALUES (5, 1);
+INSERT INTO qiita.analysis_portal VALUES (6, 1);
+INSERT INTO qiita.analysis_portal VALUES (7, 2);
+INSERT INTO qiita.analysis_portal VALUES (8, 2);
+INSERT INTO qiita.analysis_portal VALUES (9, 2);
+INSERT INTO qiita.analysis_portal VALUES (10, 2);
+
+
+--
+-- Data for Name: processing_job_status; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.processing_job_status VALUES (1, 'queued', 'The job is waiting to be run');
+INSERT INTO qiita.processing_job_status VALUES (2, 'running', 'The job is running');
+INSERT INTO qiita.processing_job_status VALUES (3, 'success', 'The job completed successfully');
+INSERT INTO qiita.processing_job_status VALUES (4, 'error', 'The job failed');
+INSERT INTO qiita.processing_job_status VALUES (5, 'in_construction', 'The job is one of the source nodes of a workflow that is in construction');
+INSERT INTO qiita.processing_job_status VALUES (6, 'waiting', 'The job is waiting for a previous job in the workflow to be completed in order to be executed.');
+
+
+--
+-- Data for Name: processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.processing_job VALUES ('6d368e16-2242-4cf8-87b4-a5dc40bb890b', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('4c7115e8-4c8e-424c-bf25-96c292ca1931', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('3c9991ab-6c14-4368-a48c-841e8837a79c', 'test@foo.bar', 3, '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,"input_data":2}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('b72369f9-a886-4193-8d3d-f7b504168e75', 'shared@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, '2015-11-22 21:15:00', NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('46b76f74-e100-47aa-9bf2-c0208bcea52d', 'test@foo.bar', 1, '{"max_barcode_errors": "1.5", "sequence_max_n": "0", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "rev_comp_mapping_barcodes": "True", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('80bf25f3-5f1d-4e10-9369-315e4244f6d5', 'test@foo.bar', 3, '{"reference": "2", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('9ba5ae7a-41e1-4202-b396-0259aeaac366', 'test@foo.bar', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('e5609746-a985-41a1-babf-6b3ebe9eb5a9', 'test@foo.bar', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f', 'test@foo.bar', 1, '{"max_barcode_errors": "1.5", "sequence_max_n": "0", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "rev_comp_mapping_barcodes": "False", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0', 'test@foo.bar', 12, '{"biom_table": "8", "depth": "9000", "subsample_multinomial": "False"}', 3, NULL, NULL, NULL, NULL, false, NULL);
+INSERT INTO qiita.processing_job VALUES ('063e553b-327c-4818-ab4a-adfe58e49860', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 1, NULL, NULL, NULL, NULL, true, NULL);
+INSERT INTO qiita.processing_job VALUES ('bcc7ebcd-39c1-43e4-af2d-822e3589f14d', 'test@foo.bar', 2, '{"min_seq_len":100,"max_seq_len":1000,"trim_seq_length":false,"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,"max_primer_mismatch":0,"barcode_type":"golay_12","max_barcode_errors":1.5,"disable_bc_correction":false,"qual_score_window":0,"disable_primers":false,"reverse_primers":"disable","reverse_primer_mismatches":0,"truncate_ambi_bases":false,"input_data":1}', 2, NULL, '2015-11-22 21:00:00', 'demultiplexing', NULL, true, NULL);
+INSERT INTO qiita.processing_job VALUES ('d19f76ee-274e-4c1b-b3a2-a12d73507c55', 'shared@foo.bar', 3, '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,"input_data":2}', 4, 1, '2015-11-22 21:30:00', 'generating demux file', NULL, true, NULL);
+INSERT INTO qiita.processing_job VALUES ('ac653cb5-76a6-4a45-929e-eb9b2dee6b63', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1}', 5, NULL, NULL, NULL, NULL, true, NULL);
+
+
+--
+-- Data for Name: analysis_processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: study_person; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_person VALUES (1, 'LabDude', 'lab_dude@foo.bar', 'knight lab', '123 lab street', '121-222-3333');
+INSERT INTO qiita.study_person VALUES (2, 'empDude', 'emp_dude@foo.bar', 'broad', NULL, '444-222-3333');
+INSERT INTO qiita.study_person VALUES (3, 'PIDude', 'PI_dude@foo.bar', 'Wash U', '123 PI street', NULL);
+
+
+--
+-- Data for Name: timeseries_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.timeseries_type VALUES (1, 'None', 'None');
+INSERT INTO qiita.timeseries_type VALUES (2, 'real', 'single intervention');
+INSERT INTO qiita.timeseries_type VALUES (3, 'real', 'multiple intervention');
+INSERT INTO qiita.timeseries_type VALUES (4, 'real', 'combo intervention');
+INSERT INTO qiita.timeseries_type VALUES (5, 'pseudo', 'single intervention');
+INSERT INTO qiita.timeseries_type VALUES (6, 'pseudo', 'multiple intervention');
+INSERT INTO qiita.timeseries_type VALUES (7, 'pseudo', 'combo intervention');
+INSERT INTO qiita.timeseries_type VALUES (8, 'mixed', 'single intervention');
+INSERT INTO qiita.timeseries_type VALUES (9, 'mixed', 'multiple intervention');
+INSERT INTO qiita.timeseries_type VALUES (10, 'mixed', 'combo intervention');
+
+
+--
+-- Data for Name: study; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study VALUES (1, 'test@foo.bar', '2014-05-19 16:10:00', NULL, 1, 1, true, true, '2014-05-19 16:11:00', 3, false, false, 'Identification of the Microbiomes for Cannabis Soils', 'Cannabis Soils', 'Analysis of the Cannabis Plant Microbiome', 'This is a preliminary study to examine the microbiota associated with the Cannabis plant. Soils samples from the bulk soil, soil associated with the roots, and the rhizosphere were extracted and the DNA sequenced. Roots from three independent plants of different strains were examined. These roots were obtained November 11, 2011 from plants that had been harvested in the summer. Future studies will attempt to analyze the soils and rhizospheres from the same location at different time points in the plant lifecycle.', NULL, 'EBI123456-BB', false, '', false);
+
+
+--
+-- Data for Name: study_sample; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_sample VALUES ('1.SKB8.640193', 1, 'ERS000000', 'SAMEA0000000');
+INSERT INTO qiita.study_sample VALUES ('1.SKD8.640184', 1, 'ERS000001', 'SAMEA0000001');
+INSERT INTO qiita.study_sample VALUES ('1.SKB7.640196', 1, 'ERS000002', 'SAMEA0000002');
+INSERT INTO qiita.study_sample VALUES ('1.SKM9.640192', 1, 'ERS000003', 'SAMEA0000003');
+INSERT INTO qiita.study_sample VALUES ('1.SKM4.640180', 1, 'ERS000004', 'SAMEA0000004');
+INSERT INTO qiita.study_sample VALUES ('1.SKM5.640177', 1, 'ERS000005', 'SAMEA0000005');
+INSERT INTO qiita.study_sample VALUES ('1.SKB5.640181', 1, 'ERS000006', 'SAMEA0000006');
+INSERT INTO qiita.study_sample VALUES ('1.SKD6.640190', 1, 'ERS000007', 'SAMEA0000007');
+INSERT INTO qiita.study_sample VALUES ('1.SKB2.640194', 1, 'ERS000008', 'SAMEA0000008');
+INSERT INTO qiita.study_sample VALUES ('1.SKD2.640178', 1, 'ERS000009', 'SAMEA0000009');
+INSERT INTO qiita.study_sample VALUES ('1.SKM7.640188', 1, 'ERS000010', 'SAMEA0000010');
+INSERT INTO qiita.study_sample VALUES ('1.SKB1.640202', 1, 'ERS000011', 'SAMEA0000011');
+INSERT INTO qiita.study_sample VALUES ('1.SKD1.640179', 1, 'ERS000012', 'SAMEA0000012');
+INSERT INTO qiita.study_sample VALUES ('1.SKD3.640198', 1, 'ERS000013', 'SAMEA0000013');
+INSERT INTO qiita.study_sample VALUES ('1.SKM8.640201', 1, 'ERS000014', 'SAMEA0000014');
+INSERT INTO qiita.study_sample VALUES ('1.SKM2.640199', 1, 'ERS000015', 'SAMEA0000015');
+INSERT INTO qiita.study_sample VALUES ('1.SKB9.640200', 1, 'ERS000016', 'SAMEA0000016');
+INSERT INTO qiita.study_sample VALUES ('1.SKD5.640186', 1, 'ERS000017', 'SAMEA0000017');
+INSERT INTO qiita.study_sample VALUES ('1.SKM3.640197', 1, 'ERS000018', 'SAMEA0000018');
+INSERT INTO qiita.study_sample VALUES ('1.SKD9.640182', 1, 'ERS000019', 'SAMEA0000019');
+INSERT INTO qiita.study_sample VALUES ('1.SKB4.640189', 1, 'ERS000020', 'SAMEA0000020');
+INSERT INTO qiita.study_sample VALUES ('1.SKD7.640191', 1, 'ERS000021', 'SAMEA0000021');
+INSERT INTO qiita.study_sample VALUES ('1.SKM6.640187', 1, 'ERS000022', 'SAMEA0000022');
+INSERT INTO qiita.study_sample VALUES ('1.SKD4.640185', 1, 'ERS000023', 'SAMEA0000023');
+INSERT INTO qiita.study_sample VALUES ('1.SKB3.640195', 1, 'ERS000024', 'SAMEA0000024');
+INSERT INTO qiita.study_sample VALUES ('1.SKB6.640176', 1, 'ERS000025', 'SAMEA0000025');
+INSERT INTO qiita.study_sample VALUES ('1.SKM1.640183', 1, 'ERS000025', 'SAMEA0000026');
+
+
+--
+-- Data for Name: analysis_sample; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB8.640193', 4);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKD8.640184', 4);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB7.640196', 4);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM9.640192', 4);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM4.640180', 4);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB8.640193', 4);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKD8.640184', 4);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB7.640196', 4);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKM3.640197', 4);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB8.640193', 5);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKD8.640184', 5);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB7.640196', 5);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM9.640192', 5);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM4.640180', 5);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB8.640193', 5);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKD8.640184', 5);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB7.640196', 5);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKM3.640197', 5);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB8.640193', 6);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKD8.640184', 6);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKB7.640196', 6);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM9.640192', 6);
+INSERT INTO qiita.analysis_sample VALUES (1, '1.SKM4.640180', 6);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB8.640193', 6);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKD8.640184', 6);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKB7.640196', 6);
+INSERT INTO qiita.analysis_sample VALUES (2, '1.SKM3.640197', 6);
+INSERT INTO qiita.analysis_sample VALUES (3, '1.SKD8.640184', 4);
+INSERT INTO qiita.analysis_sample VALUES (3, '1.SKB7.640196', 4);
+INSERT INTO qiita.analysis_sample VALUES (3, '1.SKM9.640192', 4);
+INSERT INTO qiita.analysis_sample VALUES (3, '1.SKM4.640180', 4);
+
+
+--
+-- Data for Name: analysis_users; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.analysis_users VALUES (1, 'shared@foo.bar');
+
+
+--
+-- Data for Name: archive_merging_scheme; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: archive_feature_value; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: artifact_filepath; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact_filepath VALUES (1, 1);
+INSERT INTO qiita.artifact_filepath VALUES (1, 2);
+INSERT INTO qiita.artifact_filepath VALUES (2, 3);
+INSERT INTO qiita.artifact_filepath VALUES (2, 4);
+INSERT INTO qiita.artifact_filepath VALUES (2, 5);
+INSERT INTO qiita.artifact_filepath VALUES (4, 9);
+INSERT INTO qiita.artifact_filepath VALUES (5, 9);
+INSERT INTO qiita.artifact_filepath VALUES (6, 12);
+INSERT INTO qiita.artifact_filepath VALUES (7, 22);
+INSERT INTO qiita.artifact_filepath VALUES (8, 22);
+INSERT INTO qiita.artifact_filepath VALUES (9, 15);
+
+
+--
+-- Data for Name: command_output; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.command_output VALUES (1, 'demultiplexed', 1, 6, false);
+INSERT INTO qiita.command_output VALUES (2, 'demultiplexed', 2, 6, false);
+INSERT INTO qiita.command_output VALUES (3, 'OTU table', 3, 7, false);
+INSERT INTO qiita.command_output VALUES (4, 'taxa_summary', 9, 10, false);
+INSERT INTO qiita.command_output VALUES (5, 'distance_matrix', 10, 8, false);
+INSERT INTO qiita.command_output VALUES (6, 'rarefaction_curves', 11, 9, false);
+INSERT INTO qiita.command_output VALUES (7, 'rarefied_table', 12, 7, false);
+
+
+--
+-- Data for Name: artifact_output_processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact_output_processing_job VALUES (3, '46b76f74-e100-47aa-9bf2-c0208bcea52d', 1);
+INSERT INTO qiita.artifact_output_processing_job VALUES (6, '80bf25f3-5f1d-4e10-9369-315e4244f6d5', 3);
+INSERT INTO qiita.artifact_output_processing_job VALUES (5, '9ba5ae7a-41e1-4202-b396-0259aeaac366', 3);
+INSERT INTO qiita.artifact_output_processing_job VALUES (4, 'e5609746-a985-41a1-babf-6b3ebe9eb5a9', 3);
+INSERT INTO qiita.artifact_output_processing_job VALUES (2, '6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f', 1);
+INSERT INTO qiita.artifact_output_processing_job VALUES (9, '8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0', 7);
+
+
+--
+-- Data for Name: artifact_processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact_processing_job VALUES (1, '6d368e16-2242-4cf8-87b4-a5dc40bb890b');
+INSERT INTO qiita.artifact_processing_job VALUES (1, '4c7115e8-4c8e-424c-bf25-96c292ca1931');
+INSERT INTO qiita.artifact_processing_job VALUES (2, '3c9991ab-6c14-4368-a48c-841e8837a79c');
+INSERT INTO qiita.artifact_processing_job VALUES (1, '063e553b-327c-4818-ab4a-adfe58e49860');
+INSERT INTO qiita.artifact_processing_job VALUES (1, 'bcc7ebcd-39c1-43e4-af2d-822e3589f14d');
+INSERT INTO qiita.artifact_processing_job VALUES (1, 'b72369f9-a886-4193-8d3d-f7b504168e75');
+INSERT INTO qiita.artifact_processing_job VALUES (2, 'd19f76ee-274e-4c1b-b3a2-a12d73507c55');
+INSERT INTO qiita.artifact_processing_job VALUES (1, '46b76f74-e100-47aa-9bf2-c0208bcea52d');
+INSERT INTO qiita.artifact_processing_job VALUES (2, '80bf25f3-5f1d-4e10-9369-315e4244f6d5');
+INSERT INTO qiita.artifact_processing_job VALUES (2, '9ba5ae7a-41e1-4202-b396-0259aeaac366');
+INSERT INTO qiita.artifact_processing_job VALUES (2, 'e5609746-a985-41a1-babf-6b3ebe9eb5a9');
+INSERT INTO qiita.artifact_processing_job VALUES (1, '6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f');
+INSERT INTO qiita.artifact_processing_job VALUES (8, '8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0');
+
+
+--
+-- Data for Name: artifact_type_filepath_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.artifact_type_filepath_type VALUES (1, 17, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (2, 18, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (3, 1, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (3, 2, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (3, 3, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (4, 18, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (4, 19, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (5, 1, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (5, 2, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (6, 4, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (6, 5, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (6, 6, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (6, 13, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (7, 7, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (7, 8, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (7, 13, false);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (8, 8, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (9, 8, true);
+INSERT INTO qiita.artifact_type_filepath_type VALUES (10, 8, true);
+
+
+--
+-- Data for Name: controlled_vocab; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: mixs_field_description; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: column_controlled_vocabularies; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: column_ontology; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: command_parameter; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.command_parameter VALUES (1, 'input_data', 'artifact', true, NULL, 1, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'max_bad_run_length', 'integer', false, '3', 2, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'min_per_read_length_fraction', 'float', false, '0.75', 3, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'sequence_max_n', 'integer', false, '0', 4, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'rev_comp_barcode', 'bool', false, 'False', 5, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'rev_comp_mapping_barcodes', 'bool', false, 'False', 6, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'rev_comp', 'bool', false, 'False', 7, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'phred_quality_threshold', 'integer', false, '3', 8, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'barcode_type', 'string', false, 'golay_12', 9, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'max_barcode_errors', 'float', false, '1.5', 10, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'input_data', 'artifact', true, NULL, 11, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'min_seq_len', 'integer', false, '200', 12, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'max_seq_len', 'integer', false, '1000', 13, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'trim_seq_length', 'bool', false, 'False', 14, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'min_qual_score', 'integer', false, '25', 15, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'max_ambig', 'integer', false, '6', 16, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'max_homopolymer', 'integer', false, '6', 17, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'max_primer_mismatch', 'integer', false, '0', 18, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'barcode_type', 'string', false, 'golay_12', 19, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'max_barcode_errors', 'float', false, '1.5', 20, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'disable_bc_correction', 'bool', false, 'False', 21, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'qual_score_window', 'integer', false, '0', 22, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'disable_primers', 'bool', false, 'False', 23, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'reverse_primers', 'choice:["disable", "truncate_only", "truncate_remove"]', false, 'disable', 24, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'reverse_primer_mismatches', 'integer', false, '0', 25, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (2, 'truncate_ambi_bases', 'bool', false, 'False', 26, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'input_data', 'artifact', true, NULL, 27, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'reference', 'reference', false, '1', 28, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'sortmerna_e_value', 'float', false, '1', 29, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'sortmerna_max_pos', 'integer', false, '10000', 30, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'similarity', 'float', false, '0.97', 31, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'sortmerna_coverage', 'float', false, '0.97', 32, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (3, 'threads', 'integer', false, '1', 33, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'files', 'string', true, NULL, 35, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'artifact_type', 'string', true, NULL, 36, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (5, 'input_data', 'artifact', true, NULL, 37, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'template', 'prep_template', true, NULL, 38, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'files', 'string', true, NULL, 39, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'artifact_type', 'string', true, NULL, 40, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (7, 'input_data', 'artifact', true, NULL, 41, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (1, 'phred_offset', 'choice:["auto", "33", "64"]', false, 'auto', 42, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'provenance', 'string', false, NULL, 43, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'provenance', 'string', false, NULL, 44, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'analysis', 'analysis', false, NULL, 45, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'template', 'prep_template', false, NULL, 34, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (8, 'analysis', 'analysis', true, NULL, 46, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (8, 'merge_dup_sample_ids', 'bool', false, 'False', 47, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (9, 'metadata_category', 'string', false, '', 48, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (9, 'sort', 'bool', false, 'False', 49, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (10, 'tree', 'string', false, '', 50, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (10, 'metric', 'choice:["abund_jaccard","binary_chisq","binary_chord","binary_euclidean","binary_hamming","binary_jaccard","binary_lennon","binary_ochiai","binary_otu_gain","binary_pearson","binary_sorensen_dice","bray_curtis","bray_curtis_faith","bray_curtis_magurran","canberra","chisq","chord","euclidean","gower","hellinger","kulczynski","manhattan","morisita_horn","pearson","soergel","spearman_approx","specprof","unifrac","unifrac_g","unifrac_g_full_tree","unweighted_unifrac","unweighted_unifrac_full_tree","weighted_normalized_unifrac","weighted_unifrac"]', false, '"binary_jaccard"', 51, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'tree', 'string', false, '', 52, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'num_steps', 'integer', false, '10', 53, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'min_rare_depth', 'integer', false, '10', 54, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'max_rare_depth', 'integer', false, 'Default', 55, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'metrics', 'mchoice:["ace","berger_parker_d","brillouin_d","chao1","chao1_ci","dominance","doubles","enspie","equitability","esty_ci","fisher_alpha","gini_index","goods_coverage","heip_e","kempton_taylor_q","margalef","mcintosh_d","mcintosh_e","menhinick","michaelis_menten_fit","observed_otus","observed_species","osd","simpson_reciprocal","robbins","shannon","simpson","simpson_e","singles","strong","PD_whole_tree"]', false, '["chao1","observed_otus"]', 56, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (12, 'depth', 'integer', true, NULL, 57, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (12, 'subsample_multinomial', 'bool', false, 'False', 58, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (9, 'biom_table', 'artifact', true, NULL, 59, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (10, 'biom_table', 'artifact', true, NULL, 60, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (11, 'biom_table', 'artifact', true, NULL, 61, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (12, 'biom_table', 'artifact', true, NULL, 62, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (13, 'job', 'string', true, NULL, 63, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (14, 'artifact', 'integer', true, NULL, 64, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (15, 'artifact', 'integer', true, NULL, 65, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (15, 'prep_template', 'prep_template', true, NULL, 66, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (16, 'artifact', 'integer', true, NULL, 67, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (16, 'submission_type', 'choice:["ADD", "MODIFY"]', false, 'ADD', 68, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (17, 'artifact', 'integer', true, NULL, 69, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (18, 'fp', 'string', true, NULL, 70, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (18, 'study_id', 'integer', true, NULL, 71, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (18, 'is_mapping_file', 'boolean', false, 'true', 72, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (18, 'data_type', 'string', true, NULL, 73, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (19, 'study', 'integer', true, NULL, 74, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (19, 'template_fp', 'string', true, NULL, 75, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (20, 'study', 'integer', true, NULL, 76, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (21, 'study', 'integer', true, NULL, 77, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (22, 'prep_template', 'integer', true, NULL, 78, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (22, 'template_fp', 'string', true, NULL, 79, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (23, 'obj_class', 'choice:["SampleTemplate", "PrepTemplate"]', true, NULL, 80, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (23, 'obj_id', 'integer', true, NULL, 81, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (23, 'sample_or_col', 'choice:["samples", "columns"]', true, NULL, 82, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (23, 'name', 'string', true, NULL, 83, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (24, 'job_id', 'string', true, NULL, 84, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (24, 'payload', 'string', true, NULL, 85, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (4, 'name', 'string', false, 'default_name', 86, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'name', 'string', false, 'default_name', 87, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (25, 'analysis_id', 'integer', true, NULL, 88, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (6, 'analysis', 'analysis', false, NULL, 89, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (26, 'url', 'string', true, NULL, 90, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (26, 'private_key', 'string', true, NULL, 91, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (26, 'study_id', 'integer', true, NULL, 92, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (27, 'url', 'string', true, NULL, 93, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (27, 'destination', 'string', true, NULL, 94, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (27, 'private_key', 'string', true, NULL, 95, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (28, 'download_source', 'choice:["EBI-ENA", "SRA"]', false, 'EBI-ENA', 96, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (28, 'accession', 'string', false, 'None', 97, NULL, false);
+INSERT INTO qiita.command_parameter VALUES (8, 'categories', 'mchoice', true, NULL, 98, NULL, false);
+
+
+--
+-- Data for Name: controlled_vocab_values; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: default_parameter_set; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_parameter_set VALUES (8, 2, 'Defaults with Golay 12 barcodes', '{"min_seq_len":200,"max_seq_len":1000,"trim_seq_length":false,"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,"max_primer_mismatch":0,"barcode_type":"golay_12","max_barcode_errors":1.5,"disable_bc_correction":false,"qual_score_window":0,"disable_primers":false,"reverse_primers":"disable","reverse_primer_mismatches":0,"truncate_ambi_bases":false}');
+INSERT INTO qiita.default_parameter_set VALUES (9, 2, 'Defaults with Hamming 8 barcodes', '{"min_seq_len":200,"max_seq_len":1000,"trim_seq_length":false,"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,"max_primer_mismatch":0,"barcode_type":"hamming_8","max_barcode_errors":1.5,"disable_bc_correction":false,"qual_score_window":0,"disable_primers":false,"reverse_primers":"disable","reverse_primer_mismatches":0,"truncate_ambi_bases":false}');
+INSERT INTO qiita.default_parameter_set VALUES (10, 3, 'Defaults', '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1}');
+INSERT INTO qiita.default_parameter_set VALUES (11, 1, 'per sample FASTQ defaults, phred_offset 33', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"not-barcoded","max_barcode_errors":1.5,"phred_offset":"33"}');
+INSERT INTO qiita.default_parameter_set VALUES (12, 1, 'per sample FASTQ defaults, phred_offset 64', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"not-barcoded","max_barcode_errors":1.5,"phred_offset":"64"}');
+INSERT INTO qiita.default_parameter_set VALUES (1, 1, 'Defaults', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (2, 1, 'Defaults with reverse complement mapping file barcodes', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (3, 1, 'barcode_type 8, defaults', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"8","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (4, 1, 'barcode_type 8, reverse complement mapping file barcodes', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"8","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (5, 1, 'barcode_type 6, defaults', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"6","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (6, 1, 'barcode_type 6, reverse complement mapping file barcodes', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"6","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (7, 1, 'per sample FASTQ defaults', '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"not-barcoded","max_barcode_errors":1.5,"phred_offset":"auto"}');
+INSERT INTO qiita.default_parameter_set VALUES (13, 9, 'Defaults', '{"sort": false, "metadata_category": ""}');
+INSERT INTO qiita.default_parameter_set VALUES (14, 10, 'Unweighted UniFrac', '{"metric": "unweighted_unifrac", "tree": ""}');
+INSERT INTO qiita.default_parameter_set VALUES (15, 11, 'Defaults', '{"max_rare_depth": "Default", "tree": "", "num_steps": 10, "min_rare_depth": 10, "metrics": ["chao1", "observed_otus"]}');
+INSERT INTO qiita.default_parameter_set VALUES (16, 12, 'Defaults', '{"subsample_multinomial": "False"}');
+
+
+--
+-- Data for Name: default_workflow; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_workflow VALUES (3, 'Per sample FASTQ upstream workflow', true, NULL, 3, '{"prep": {}, "sample": {}}');
+INSERT INTO qiita.default_workflow VALUES (1, 'FASTQ upstream workflow', true, 'This accepts html Qiita!
BYE!', 3, '{"prep": {}, "sample": {}}');
+INSERT INTO qiita.default_workflow VALUES (2, 'FASTA upstream workflow', true, 'This is another description', 3, '{"prep": {}, "sample": {}}');
+
+
+--
+-- Data for Name: default_workflow_data_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_workflow_data_type VALUES (1, 1);
+INSERT INTO qiita.default_workflow_data_type VALUES (1, 2);
+INSERT INTO qiita.default_workflow_data_type VALUES (2, 2);
+INSERT INTO qiita.default_workflow_data_type VALUES (3, 3);
+
+
+--
+-- Data for Name: default_workflow_node; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_workflow_node VALUES (1, 1, 1);
+INSERT INTO qiita.default_workflow_node VALUES (2, 1, 10);
+INSERT INTO qiita.default_workflow_node VALUES (3, 2, 8);
+INSERT INTO qiita.default_workflow_node VALUES (4, 2, 10);
+INSERT INTO qiita.default_workflow_node VALUES (5, 3, 7);
+INSERT INTO qiita.default_workflow_node VALUES (6, 3, 10);
+
+
+--
+-- Data for Name: default_workflow_edge; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_workflow_edge VALUES (1, 1, 2);
+INSERT INTO qiita.default_workflow_edge VALUES (2, 3, 4);
+INSERT INTO qiita.default_workflow_edge VALUES (3, 5, 6);
+
+
+--
+-- Data for Name: default_workflow_edge_connections; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.default_workflow_edge_connections VALUES (1, 1, 27);
+INSERT INTO qiita.default_workflow_edge_connections VALUES (2, 2, 27);
+INSERT INTO qiita.default_workflow_edge_connections VALUES (3, 1, 27);
+
+
+--
+-- Data for Name: download_link; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: ebi_run_accession; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB1.640202', 'ERR0000001', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB2.640194', 'ERR0000002', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB3.640195', 'ERR0000003', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB4.640189', 'ERR0000004', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB5.640181', 'ERR0000005', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB6.640176', 'ERR0000006', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB7.640196', 'ERR0000007', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB8.640193', 'ERR0000008', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKB9.640200', 'ERR0000009', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD1.640179', 'ERR0000010', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD2.640178', 'ERR0000011', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD3.640198', 'ERR0000012', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD4.640185', 'ERR0000013', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD5.640186', 'ERR0000014', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD6.640190', 'ERR0000015', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD7.640191', 'ERR0000016', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD8.640184', 'ERR0000017', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKD9.640182', 'ERR0000018', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM1.640183', 'ERR0000019', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM2.640199', 'ERR0000020', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM3.640197', 'ERR0000021', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM4.640180', 'ERR0000022', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM5.640177', 'ERR0000023', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM6.640187', 'ERR0000024', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM7.640188', 'ERR0000025', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM8.640201', 'ERR0000026', 2);
+INSERT INTO qiita.ebi_run_accession VALUES ('1.SKM9.640192', 'ERR0000027', 2);
+
+
+--
+-- Data for Name: environmental_package; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.environmental_package VALUES ('air', 'ep_air');
+INSERT INTO qiita.environmental_package VALUES ('built environment', 'ep_built_environment');
+INSERT INTO qiita.environmental_package VALUES ('host-associated', 'ep_host_associated');
+INSERT INTO qiita.environmental_package VALUES ('human-amniotic-fluid', 'ep_human_amniotic_fluid');
+INSERT INTO qiita.environmental_package VALUES ('human-associated', 'ep_human_associated');
+INSERT INTO qiita.environmental_package VALUES ('human-blood', 'ep_human_blood');
+INSERT INTO qiita.environmental_package VALUES ('human-gut', 'ep_human_gut');
+INSERT INTO qiita.environmental_package VALUES ('human-oral', 'ep_human_oral');
+INSERT INTO qiita.environmental_package VALUES ('human-skin', 'ep_human_skin');
+INSERT INTO qiita.environmental_package VALUES ('human-urine', 'ep_human_urine');
+INSERT INTO qiita.environmental_package VALUES ('human-vaginal', 'ep_human_vaginal');
+INSERT INTO qiita.environmental_package VALUES ('microbial mat/biofilm', 'ep_microbial_mat_biofilm');
+INSERT INTO qiita.environmental_package VALUES ('miscellaneous natural or artificial environment', 'ep_misc_artif');
+INSERT INTO qiita.environmental_package VALUES ('plant-associated', 'ep_plant_associated');
+INSERT INTO qiita.environmental_package VALUES ('sediment', 'ep_sediment');
+INSERT INTO qiita.environmental_package VALUES ('soil', 'ep_soil');
+INSERT INTO qiita.environmental_package VALUES ('wastewater/sludge', 'ep_wastewater_sludge');
+INSERT INTO qiita.environmental_package VALUES ('water', 'ep_water');
+
+
+--
+-- Data for Name: investigation; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.investigation VALUES (1, 'TestInvestigation', 'An investigation for testing purposes', 3);
+
+
+--
+-- Data for Name: investigation_study; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.investigation_study VALUES (1, 1);
+
+
+--
+-- Data for Name: message; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.message VALUES (1, 'message 1', '2024-05-03 12:08:36.627074', NULL);
+INSERT INTO qiita.message VALUES (2, 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque sed auctor ex, non placerat sapien. Vestibulum vestibulum massa ut sapien condimentum, cursus consequat diam sodales. Nulla aliquam arcu ut massa auctor, et vehicula mauris tempor. In lacinia viverra ante quis pellentesque. Nunc vel mi accumsan, porttitor eros ut, pharetra elit. Nulla ac nisi quis dui egestas malesuada vitae ut mauris. Morbi blandit non nisl a finibus. In erat velit, congue at ipsum sit amet, venenatis bibendum sem. Curabitur vel odio sed est rutrum rutrum. Quisque efficitur ut purus in ultrices. Pellentesque eu auctor justo.', '2024-05-03 12:08:36.627074', NULL);
+INSERT INTO qiita.message VALUES (3, 'message 3', '2024-05-03 12:08:36.627074', NULL);
+
+
+--
+-- Data for Name: message_user; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.message_user VALUES ('test@foo.bar', 1, false);
+INSERT INTO qiita.message_user VALUES ('shared@foo.bar', 1, false);
+INSERT INTO qiita.message_user VALUES ('test@foo.bar', 2, false);
+INSERT INTO qiita.message_user VALUES ('test@foo.bar', 3, false);
+
+
+--
+-- Data for Name: oauth_identifiers; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.oauth_identifiers VALUES ('8mL2V1gX1kK0gXuKpEhIhzaiVxrhLvJ0OjHkeqJHKjG3d6abU2', 'qbQolcKEJ64I4jUbMxILwuTFb7IOXlYMG78QnqgtvlpIEQdiGWLUmKplz2qfnZwy7d7hqjc73qntzKTONhY27wT6cKohnNuPuKMCTLOQgrJvD6eJ2lKWH1pZeGM2zMLucZcSzlTQjYhiZruUbMeZ13GjsuFBjyVOzF8HP4cQ4xQuA1Fr8N4Yf9yQn5VqcA1byCnMWaPV95FFokdUlFCUGGEeJVRKbEn5t7qAgUlwz0B6quZICHtpiKuVDl8lNZm');
+INSERT INTO qiita.oauth_identifiers VALUES ('ROeSvinuTLAggxQLrsa6ycCw0ZvbYaPk8DYHB5fb8J6CM3CavA', 'vvbBSxs2su0Vcx4Qt4pwgCGkiq7bOemXnxDhsntSTxj9PAIFyDFOG1rNxj9xPhF8ugPxacilgs5PrRj93mYhnKHSTvMM9ksfQ6GmV3GvtCX0gAAjtE29ChyT0DZzOhwumke2ip9lumyZbYZhWAgWyyuzCmsKqvNjAXJfY70juQaGn3ySTmNXtqnVT7HYmSJYsqY07FLuL0CV696dsrbEOBja8Xi6nlhkiQ4g6d2UI55PdqMEz1J0zKnLNiQirGL');
+INSERT INTO qiita.oauth_identifiers VALUES ('CTjfltNkjT7zpR9zvXqyhmaFPsaK4kml2x1gEuxfbv5oBCbFvn', 'uvkbakS8Zwdcd4LQUiC5rUbwAgvN6WIY8wex12Ve3sFEkeplwjxb3lTid76tpPfSGKmm3gGmfXberwtQ9Qjns82NC3x9qXZ1E85M3IXXP7DZQC1kHY24V6ftx7pJCFfTjSJEhHeZLV5Uigz08Oclo3uQCkDBWBeE42QHg9XHgIy7yeW90Z9OFPfucEWnMdodSuGAhoxtkpCK6t1QsVO1cXOrY0Vk3Yay3TrAqOpfW6008FFRzakbOqKRfTVTlrg');
+INSERT INTO qiita.oauth_identifiers VALUES ('DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4UqE', NULL);
+INSERT INTO qiita.oauth_identifiers VALUES ('19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDaO4', 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2JKhAmmCWZuabe0O5Mp28s1');
+INSERT INTO qiita.oauth_identifiers VALUES ('yKDgajoKn5xlOA8tpo48Rq8mWJkH9z4LBCx2SvqWYLIryaan2u', '9xhU5rvzq8dHCEI5sSN95jesUULrZi6pT6Wuc71fDbFbsrnWarcSq56TJLN4kP4hH');
+INSERT INTO qiita.oauth_identifiers VALUES ('dHgaXDwq665ksFPqfIoD3Jt8KRXdSioTRa4lGa5mGDnz6JTIBf', 'xqx61SD4M2EWbaS0WYv3H1nIemkvEAMIn16XMLjy5rTCqi7opCcWbfLINEwtV48bQ');
+INSERT INTO qiita.oauth_identifiers VALUES ('4MOBzUBHBtUmwhaC258H7PS0rBBLyGQrVxGPgc9g305bvVhf6h', 'rFb7jwAb3UmSUN57Bjlsi4DTl2owLwRpwCc0SggRNEVb2Ebae2p5Umnq20rNMhmqN');
+
+
+--
+-- Data for Name: oauth_software; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.oauth_software VALUES (1, 'yKDgajoKn5xlOA8tpo48Rq8mWJkH9z4LBCx2SvqWYLIryaan2u');
+INSERT INTO qiita.oauth_software VALUES (2, 'dHgaXDwq665ksFPqfIoD3Jt8KRXdSioTRa4lGa5mGDnz6JTIBf');
+INSERT INTO qiita.oauth_software VALUES (3, '4MOBzUBHBtUmwhaC258H7PS0rBBLyGQrVxGPgc9g305bvVhf6h');
+
+
+--
+-- Data for Name: ontology; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.ontology VALUES (999999999, 'ENA', true, 'European Nucleotide Archive Submission Ontology', NULL, 'http://www.ebi.ac.uk/embl/Documentation/ENA-Reads.html', 'The ENA CV is to be used to annotate XML submissions to the ENA.', '2009-02-23');
+
+
+--
+-- Data for Name: parameter_artifact_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.parameter_artifact_type VALUES (1, 3);
+INSERT INTO qiita.parameter_artifact_type VALUES (1, 5);
+INSERT INTO qiita.parameter_artifact_type VALUES (11, 1);
+INSERT INTO qiita.parameter_artifact_type VALUES (11, 2);
+INSERT INTO qiita.parameter_artifact_type VALUES (11, 4);
+INSERT INTO qiita.parameter_artifact_type VALUES (27, 6);
+INSERT INTO qiita.parameter_artifact_type VALUES (59, 7);
+INSERT INTO qiita.parameter_artifact_type VALUES (60, 7);
+INSERT INTO qiita.parameter_artifact_type VALUES (61, 7);
+INSERT INTO qiita.parameter_artifact_type VALUES (62, 7);
+
+
+--
+-- Data for Name: parent_artifact; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.parent_artifact VALUES (2, 1);
+INSERT INTO qiita.parent_artifact VALUES (3, 1);
+INSERT INTO qiita.parent_artifact VALUES (4, 2);
+INSERT INTO qiita.parent_artifact VALUES (5, 2);
+INSERT INTO qiita.parent_artifact VALUES (6, 2);
+INSERT INTO qiita.parent_artifact VALUES (9, 8);
+
+
+--
+-- Data for Name: parent_processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.parent_processing_job VALUES ('b72369f9-a886-4193-8d3d-f7b504168e75', 'd19f76ee-274e-4c1b-b3a2-a12d73507c55');
+
+
+--
+-- Data for Name: study_tags; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: per_study_tags; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: prep_1; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.prep_1 VALUES ('qiita_sample_column_names', '{"columns": ["barcode", "library_construction_protocol", "primer", "target_subfragment", "target_gene", "run_center", "run_prefix", "run_date", "experiment_center", "experiment_design_description", "experiment_title", "platform", "instrument_model", "samp_size", "sequencing_meth", "illumina_technology", "sample_center", "pcr_primers", "study_center", "center_name", "center_project_name", "emp_status"]}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB1.640202', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GTCCGCAAGTTA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB2.640194', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGTAGAGCTCTC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB3.640195', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCTCTGAGAGCT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB4.640189', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCTCGATGCAGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB5.640181', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GCGGACTATTCA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB6.640176', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGTGCACAATTG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB7.640196', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGGCCTAAGTTC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB8.640193', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AGCGCTCACATC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKB9.640200', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGGTTATGGCAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD1.640179', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGAGGTTCTGAT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD2.640178', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AACTCCTGTGGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD3.640198', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TAATGGTCGTAG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD4.640185', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TTGCACCGTCGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD5.640186', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGCTACAGACGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD6.640190', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ATGGCCTGACTA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD7.640191', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ACGCACATACAA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD8.640184', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGAGTGGTCTGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKD9.640182', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GATAGCACTCGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM1.640183', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TAGCGCGAACTT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM2.640199', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CATACACGCACC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM3.640197', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ACCTCAGTCAAG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM4.640180', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TCGACCAAACAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM5.640177', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCACCCAGTAAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM6.640187', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ATATCGCGATGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM7.640188', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGCCGGTAATCT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM8.640201', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCGATGCCTTGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_1 VALUES ('1.SKM9.640192', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AGCAGGCACGAA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+
+
+--
+-- Data for Name: prep_2; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.prep_2 VALUES ('qiita_sample_column_names', '{"columns": ["barcode", "library_construction_protocol", "primer", "target_subfragment", "target_gene", "run_center", "run_prefix", "run_date", "experiment_center", "experiment_design_description", "experiment_title", "platform", "instrument_model", "samp_size", "sequencing_meth", "illumina_technology", "sample_center", "pcr_primers", "study_center", "center_name", "center_project_name", "emp_status"]}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB1.640202', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GTCCGCAAGTTA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB2.640194', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGTAGAGCTCTC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB3.640195', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCTCTGAGAGCT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB4.640189', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCTCGATGCAGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB5.640181', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GCGGACTATTCA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB6.640176', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGTGCACAATTG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB7.640196', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGGCCTAAGTTC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB8.640193', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AGCGCTCACATC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKB9.640200', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGGTTATGGCAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD1.640179', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGAGGTTCTGAT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD2.640178', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AACTCCTGTGGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD3.640198', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TAATGGTCGTAG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD4.640185', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TTGCACCGTCGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD5.640186', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGCTACAGACGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD6.640190', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ATGGCCTGACTA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD7.640191', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ACGCACATACAA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD8.640184', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TGAGTGGTCTGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKD9.640182', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "GATAGCACTCGT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM1.640183', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TAGCGCGAACTT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM2.640199', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CATACACGCACC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM3.640197', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ACCTCAGTCAAG", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM4.640180', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "TCGACCAAACAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM5.640177', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCACCCAGTAAC", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM6.640187', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "ATATCGCGATGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM7.640188', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CGCCGGTAATCT", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM8.640201', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "CCGATGCCTTGA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+INSERT INTO qiita.prep_2 VALUES ('1.SKM9.640192', '{"primer": "GTGCCAGCMGCCGCGGTAA", "barcode": "AGCAGGCACGAA", "platform": "Illumina", "run_date": "8/1/12", "samp_size": ".25,g", "emp_status": "EMP", "run_center": "ANL", "run_prefix": "s_G1_L001_sequences", "center_name": "ANL", "pcr_primers": "FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT", "target_gene": "16S rRNA", "study_center": "CCME", "sample_center": "ANL", "sequencing_meth": "Sequencing by synthesis", "experiment_title": "Cannabis Soil Microbiome", "instrument_model": "Illumina MiSeq", "experiment_center": "ANL", "target_subfragment": "V4", "center_project_name": null, "illumina_technology": "MiSeq", "experiment_design_description": "micro biome of soil and rhizosphere of cannabis plants from CA", "library_construction_protocol": "This analysis was done as in Caporaso et al 2011 Genome research. The PCR primers (F515/R806) were developed against the V4 region of the 16S rRNA (both bacteria and archaea), which we determined would yield optimal community clustering with reads of this length using a procedure similar to that of ref. 15. [For reference, this primer pair amplifies the region 533_786 in the Escherichia coli strain 83972 sequence (greengenes accession no. prokMSA_id:470367).] The reverse PCR primer is barcoded with a 12-base error-correcting Golay code to facilitate multiplexing of up to 1,500 samples per lane, and both PCR primers contain sequencer adapter regions."}');
+
+
+--
+-- Data for Name: prep_template; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.prep_template VALUES (2, 2, 'success', 'Metagenomics', 7, 'Prep information 2', false, '2024-05-03 12:08:37.549542', '2024-05-03 12:08:37.549542', NULL);
+INSERT INTO qiita.prep_template VALUES (1, 2, 'success', 'Metagenomics', 1, 'Prep information 1', false, '1970-01-01 00:00:00', '1970-01-01 00:00:00', NULL);
+
+
+--
+-- Data for Name: prep_template_filepath; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.prep_template_filepath VALUES (1, 18);
+INSERT INTO qiita.prep_template_filepath VALUES (1, 19);
+INSERT INTO qiita.prep_template_filepath VALUES (1, 20);
+INSERT INTO qiita.prep_template_filepath VALUES (1, 21);
+
+
+--
+-- Data for Name: prep_template_processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: prep_template_sample; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB8.640193', 'ERX0000000');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD8.640184', 'ERX0000001');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB7.640196', 'ERX0000002');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM9.640192', 'ERX0000003');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM4.640180', 'ERX0000004');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM5.640177', 'ERX0000005');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB5.640181', 'ERX0000006');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD6.640190', 'ERX0000007');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB2.640194', 'ERX0000008');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD2.640178', 'ERX0000009');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM7.640188', 'ERX0000010');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB1.640202', 'ERX0000011');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD1.640179', 'ERX0000012');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD3.640198', 'ERX0000013');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM8.640201', 'ERX0000014');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM2.640199', 'ERX0000015');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB9.640200', 'ERX0000016');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD5.640186', 'ERX0000017');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM3.640197', 'ERX0000018');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD9.640182', 'ERX0000019');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB4.640189', 'ERX0000020');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD7.640191', 'ERX0000021');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM6.640187', 'ERX0000022');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKD4.640185', 'ERX0000023');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB3.640195', 'ERX0000024');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKB6.640176', 'ERX0000025');
+INSERT INTO qiita.prep_template_sample VALUES (1, '1.SKM1.640183', 'ERX0000026');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB8.640193', 'ERX0000000');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD8.640184', 'ERX0000001');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB7.640196', 'ERX0000002');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM9.640192', 'ERX0000003');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM4.640180', 'ERX0000004');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM5.640177', 'ERX0000005');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB5.640181', 'ERX0000006');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD6.640190', 'ERX0000007');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB2.640194', 'ERX0000008');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD2.640178', 'ERX0000009');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM7.640188', 'ERX0000010');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB1.640202', 'ERX0000011');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD1.640179', 'ERX0000012');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD3.640198', 'ERX0000013');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM8.640201', 'ERX0000014');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM2.640199', 'ERX0000015');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB9.640200', 'ERX0000016');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD5.640186', 'ERX0000017');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM3.640197', 'ERX0000018');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD9.640182', 'ERX0000019');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB4.640189', 'ERX0000020');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD7.640191', 'ERX0000021');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM6.640187', 'ERX0000022');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKD4.640185', 'ERX0000023');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB3.640195', 'ERX0000024');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKB6.640176', 'ERX0000025');
+INSERT INTO qiita.prep_template_sample VALUES (2, '1.SKM1.640183', 'ERX0000026');
+
+
+--
+-- Data for Name: preparation_artifact; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.preparation_artifact VALUES (1, 1);
+INSERT INTO qiita.preparation_artifact VALUES (1, 2);
+INSERT INTO qiita.preparation_artifact VALUES (1, 3);
+INSERT INTO qiita.preparation_artifact VALUES (1, 4);
+INSERT INTO qiita.preparation_artifact VALUES (1, 5);
+INSERT INTO qiita.preparation_artifact VALUES (1, 6);
+INSERT INTO qiita.preparation_artifact VALUES (2, 7);
+
+
+--
+-- Data for Name: processing_job_resource_allocation; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('REGISTER', 'single-core-8gb', 'REGISTER', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('default', 'single-core-8gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('default', 'single-core-8gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('default', 'multi-core-vlow', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 5 --mem-per-cpu 8gb --time 168:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('delete_analysis', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Calculate beta correlation', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('delete_sample_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('delete_study', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('delete_sample_or_column', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('create_sample_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('update_prep_template', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('copy_artifact', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('delete_artifact', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('download_remote_files', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('list_remote_files', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('submit_to_EBI', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Generate HTML summary', 'single-core-8gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem-per-cpu 8gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('update_sample_template', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('build_analysis_files', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Custom-axis Emperor plot', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Calculate alpha correlation', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Summarize taxa', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Perform Principal Coordinates Analysis (PCoA)', 'single-core-16gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Split libraries', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Calculate alpha diversity', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Calculate beta diversity', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Calculate beta group significance', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Filter samples by metadata', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Rarefy features', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Validate', 'single-core-56gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 60gb --time 25:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Trimming', 'single-core-120gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 120gb --time 80:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Split libraries FASTQ', 'single-core-120gb', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 1 --mem 120gb --time 80:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Deblur', 'multi-core-low', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 5 --mem 96gb --time 130:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Shogun', 'multi-core-low', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 5 --mem 96gb --time 130:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Pick closed-reference OTUs', 'multi-core-high', 'RESOURCE_PARAMS_COMMAND', '-p qiita -N 1 -n 5 --mem 120gb --time 130:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Pick closed-reference OTUs', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 24gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Trimming', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 24gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Filter samples by metadata', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 24gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Rarefy features', 'single-core-24gb', 'RELEASE_VALIDATORS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 24gb --time 50:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('BIOM', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('alpha_vector', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('distance_matrix', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Demultiplexed', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('ordination_results', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('q2_visualization', 'single-core-16gb', 'COMPLETE_JOBS_RESOURCE_PARAM', '-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('default', NULL, 'VALIDATOR', '-p qiita -N 1 -n 1 --mem 1gb --time 4:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('per_sample_FASTQ', NULL, 'VALIDATOR', '-p qiita -N 1 -n 5 --mem 2gb --time 10:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('ordination_results', NULL, 'VALIDATOR', '-p qiita -N 1 -n 1 --mem 10gb --time 2:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('Demultiplexed', NULL, 'VALIDATOR', '-p qiita -N 1 -n 5 --mem 25gb --time 150:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('distance_matrix', NULL, 'VALIDATOR', '-p qiita -N 1 -n 1 --mem 42gb --time 150:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('BIOM', NULL, 'VALIDATOR', '-p qiita -N 1 -n 1 --mem 90gb --time 150:00:00');
+INSERT INTO qiita.processing_job_resource_allocation VALUES ('alpha_vector', NULL, 'VALIDATOR', '-p qiita -N 1 -n 1 --mem 10gb --time 70:00:00');
+
+
+--
+-- Data for Name: processing_job_validator; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: processing_job_workflow; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.processing_job_workflow VALUES (1, 'shared@foo.bar', 'Testing processing workflow');
+INSERT INTO qiita.processing_job_workflow VALUES (2, 'test@foo.bar', 'Single node workflow');
+
+
+--
+-- Data for Name: processing_job_workflow_root; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.processing_job_workflow_root VALUES (1, 'b72369f9-a886-4193-8d3d-f7b504168e75');
+INSERT INTO qiita.processing_job_workflow_root VALUES (2, 'ac653cb5-76a6-4a45-929e-eb9b2dee6b63');
+
+
+--
+-- Data for Name: publication; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.publication VALUES ('10.1038/nmeth.f.303', '20383131');
+INSERT INTO qiita.publication VALUES ('10.1186/2047-217X-1-7', '23587224');
+INSERT INTO qiita.publication VALUES ('10.100/123456', '123456');
+INSERT INTO qiita.publication VALUES ('10.100/7891011', '7891011');
+
+
+--
+-- Data for Name: reference; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.reference VALUES (1, 'Greengenes', '13_8', 6, 7, 8);
+INSERT INTO qiita.reference VALUES (2, 'Silva', 'test', 10, 11, NULL);
+
+
+--
+-- Data for Name: restrictions; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.restrictions VALUES ('study_sample', 'env_package', '{air,"built environment",host-associated,human-associated,human-skin,human-oral,human-gut,human-vaginal,"microbial mat/biofilm","misc environment",plant-associated,sediment,soil,wastewater/sludge,water}');
+INSERT INTO qiita.restrictions VALUES ('prep_template_sample', 'target_gene', '{"16S rRNA","18S rRNA",ITS1/2,LSU}');
+INSERT INTO qiita.restrictions VALUES ('prep_template_sample', 'target_subfragment', '{V3,V4,V6,V9,ITS1/2}');
+INSERT INTO qiita.restrictions VALUES ('prep_template_sample', 'instrument_model', '{"454 GS","454 GS 20","454 GS FLX","454 GS FLX+","454 GS FLX Titanium","454 GS Junior","Illumina Genome Analyzer","Illumina Genome Analyzer II","Illumina Genome Analyzer IIx","Illumina HiScanSQ","Illumina HiSeq 1000","Illumina HiSeq 1500","Illumina HiSeq 2000","Illumina HiSeq 2500","Illumina HiSeq 3000","Illumina HiSeq 4000","Illumina MiSeq","Illumina MiniSeq","Illumina NovaSeq 6000","NextSeq 500","NextSeq 550","Ion Torrent PGM","Ion Torrent Proton","Ion Torrent S5","Ion Torrent S5 XL",MinION,GridION,PromethION,unspecified}');
+INSERT INTO qiita.restrictions VALUES ('prep_template_sample', 'platform', '{FASTA,Illumina,Ion_Torrent,LS454,"Oxford Nanopore"}');
+
+
+--
+-- Data for Name: sample_1; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.sample_1 VALUES ('qiita_sample_column_names', '{"columns": ["season_environment", "assigned_from_geo", "texture", "taxon_id", "depth", "host_taxid", "common_name", "water_content_soil", "elevation", "temp", "tot_nitro", "samp_salinity", "altitude", "env_biome", "country", "ph", "anonymized_name", "tot_org_carb", "description_duplicate", "env_feature", "physical_specimen_location", "physical_specimen_remaining", "dna_extracted", "sample_type", "env_package", "collection_timestamp", "host_subject_id", "description", "latitude", "longitude", "scientific_name"]}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM7.640188', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "60.1102854322", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "74.7123248382", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM7", "host_subject_id": "1001:B6", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Roots", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD9.640182', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "23.1218032799", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "42.838497795", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD9", "host_subject_id": "1001:D3", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM8.640201', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "3.21190859967", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "26.8138925876", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM8", "host_subject_id": "1001:D8", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Roots", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB8.640193', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "74.0894932572", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "65.3283470202", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB8", "host_subject_id": "1001:M7", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD2.640178', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "53.5050692395", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "31.6056761814", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD2", "host_subject_id": "1001:B5", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM3.640197', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "Not applicable", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "31.2003474585", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM3", "host_subject_id": "1001:B7", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM4.640180', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "Not applicable", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "Not applicable", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM4", "host_subject_id": "1001:D2", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB9.640200', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "12.6245524972", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "96.0693176066", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB9", "host_subject_id": "1001:B3", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB4.640189', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "43.9614715197", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "82.8516734159", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB4", "host_subject_id": "1001:D7", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB5.640181', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "10.6655599093", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "70.784770579", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB5", "host_subject_id": "1001:M4", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB6.640176', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "78.3634273709", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "74.423907894", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB6", "host_subject_id": "1001:D5", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM2.640199', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "82.8302905615", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "86.3615778099", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM2", "host_subject_id": "1001:D4", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM5.640177', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "44.9725384282", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "66.1920014699", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM5", "host_subject_id": "1001:M3", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB1.640202', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "4.59216095574", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "63.5115213108", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB1", "host_subject_id": "1001:M2", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD8.640184', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "57.571893782", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "32.5563076447", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD8", "host_subject_id": "1001:D9", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD4.640185', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "40.8623799474", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "6.66444220187", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD4", "host_subject_id": "1001:M9", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB3.640195', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "95.2060749748", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "27.3592668624", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB3", "host_subject_id": "1001:M6", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM1.640183', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "38.2627021402", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "3.48274264219", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM1", "host_subject_id": "1001:D1", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB7.640196', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "13.089194595", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "92.5274472082", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB7", "host_subject_id": "1001:M8", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD3.640198', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "84.0030227585", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "66.8954849864", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD3", "host_subject_id": "1001:B1", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD7.640191', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "68.51099627", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "2.35063674718", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD7", "host_subject_id": "1001:D6", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Root", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD6.640190', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "29.1499460692", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "82.1270418227", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD6", "host_subject_id": "1001:B9", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKB2.640194', '{"ph": "6.94", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "64.6 sand, 17.6 silt, 17.8 clay", "altitude": "0", "latitude": "35.2374368957", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "68.5041623253", "tot_nitro": "1.41", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "5", "dna_extracted": "true", "samp_salinity": "7.15", "anonymized_name": "SKB2", "host_subject_id": "1001:B4", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.164", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Burmese bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM9.640192', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "12.7065957714", "taxon_id": "1118232", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "84.9722975792", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "root metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM9", "host_subject_id": "1001:B8", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Roots", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKM6.640187', '{"ph": "6.82", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "63.1 sand, 17.7 silt, 19.2 clay", "altitude": "0", "latitude": "0.291867635913", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "68.5945325743", "tot_nitro": "1.3", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "3.31", "dna_extracted": "true", "samp_salinity": "7.44", "anonymized_name": "SKM6", "host_subject_id": "1001:B2", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.101", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Bucu Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD5.640186', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "85.4121476399", "taxon_id": "939928", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "15.6526750776", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "rhizosphere metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD5", "host_subject_id": "1001:M1", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel Rhizo", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+INSERT INTO qiita.sample_1 VALUES ('1.SKD1.640179', '{"ph": "6.8", "temp": "15", "depth": "0.15", "country": "GAZ:United States of America", "texture": "66 sand, 16.3 silt, 17.7 clay", "altitude": "0", "latitude": "68.0991287718", "taxon_id": "410658", "elevation": "114", "env_biome": "ENVO:Temperate grasslands, savannas, and shrubland biome", "longitude": "34.8360987059", "tot_nitro": "1.51", "host_taxid": "3483", "common_name": "soil metagenome", "description": "Cannabis Soil Microbiome", "env_feature": "ENVO:plant-associated habitat", "env_package": "soil", "sample_type": "ENVO:soil", "tot_org_carb": "4.32", "dna_extracted": "true", "samp_salinity": "7.1", "anonymized_name": "SKD1", "host_subject_id": "1001:M5", "scientific_name": "1118232", "assigned_from_geo": "n", "season_environment": "winter", "water_content_soil": "0.178", "collection_timestamp": "2011-11-11 13:00:00", "description_duplicate": "Diesel bulk", "physical_specimen_location": "ANL", "physical_specimen_remaining": "true"}');
+
+
+--
+-- Data for Name: sample_template_filepath; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
INSERT INTO qiita.sample_template_filepath VALUES (1, 17);
--- Create the new prep_template_filepath
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES ('1_prep_1_19700101-000000.txt', 15, '3703494589', 1, 9);
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES ('1_prep_1_qiime_19700101-000000.txt', 16, '3703494589', 1, 9);
-INSERT INTO qiita.prep_template_filepath VALUES (1, 18), (1, 19);
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES ('1_prep_1_19700101-000000.txt', 15, '3703494589', 1, 9);
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES ('1_prep_1_qiime_19700101-000000.txt', 16, '3703494589', 1, 9);
-INSERT INTO qiita.prep_template_filepath VALUES (1, 20), (1, 21);
-
-
--- Inserting the BIOM artifact filepath
-INSERT INTO qiita.filepath (filepath, filepath_type_id, checksum, checksum_algorithm_id, data_directory_id) VALUES
- ('biom_table.biom', 7, 3574395811, 1, 16);
-
--- Link the artifacts with the filepaths
-INSERT INTO qiita.artifact_filepath (artifact_id, filepath_id)
- VALUES (1, 1), (1, 2),
- (2, 3), (2, 4), (2, 5),
- (4, 9), (5, 9), (6, 12), (7, 22),
- (8, 22), (9, 15);
-
--- Create some test messages
-INSERT INTO qiita.message (message) VALUES ('message 1'), ('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque sed auctor ex, non placerat sapien. Vestibulum vestibulum massa ut sapien condimentum, cursus consequat diam sodales. Nulla aliquam arcu ut massa auctor, et vehicula mauris tempor. In lacinia viverra ante quis pellentesque. Nunc vel mi accumsan, porttitor eros ut, pharetra elit. Nulla ac nisi quis dui egestas malesuada vitae ut mauris. Morbi blandit non nisl a finibus. In erat velit, congue at ipsum sit amet, venenatis bibendum sem. Curabitur vel odio sed est rutrum rutrum. Quisque efficitur ut purus in ultrices. Pellentesque eu auctor justo.'), ('message 3');
-INSERT INTO qiita.message_user (message_id, email) VALUES (1, 'test@foo.bar'),(1, 'shared@foo.bar'),(2, 'test@foo.bar'),(3, 'test@foo.bar');
-
--- Create a loggin entry
-INSERT INTO qiita.logging (time, severity_id, msg, information)
- VALUES ('Sun Nov 22 21:29:30 2015', 2, 'Error message', NULL),
- ('Sun Nov 22 21:29:30 2015', 2, 'Error message', '{}');
-
--- Create some processing jobs
-INSERT INTO qiita.processing_job (processing_job_id, email, command_id, command_parameters, processing_job_status_id, logging_id, heartbeat, step, pending, hidden) VALUES
- ('6d368e16-2242-4cf8-87b4-a5dc40bb890b', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false),
- ('4c7115e8-4c8e-424c-bf25-96c292ca1931', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false),
- ('3c9991ab-6c14-4368-a48c-841e8837a79c', 'test@foo.bar', 3, '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,"input_data":2}', 3, NULL, NULL, NULL, NULL, false),
- ('b72369f9-a886-4193-8d3d-f7b504168e75', 'shared@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, '2015-11-22 21:15:00', NULL, NULL, false),
- ('46b76f74-e100-47aa-9bf2-c0208bcea52d', 'test@foo.bar', 1, '{"max_barcode_errors": "1.5", "sequence_max_n": "0", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "rev_comp_mapping_barcodes": "True", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, NULL, NULL, NULL, NULL, false),
- ('80bf25f3-5f1d-4e10-9369-315e4244f6d5', 'test@foo.bar', 3, '{"reference": "2", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false),
- ('9ba5ae7a-41e1-4202-b396-0259aeaac366', 'test@foo.bar', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false),
- ('e5609746-a985-41a1-babf-6b3ebe9eb5a9', 'test@foo.bar', 3, '{"reference": "1", "similarity": "0.97", "sortmerna_e_value": "1", "sortmerna_max_pos": "10000", "input_data": "2", "threads": "1", "sortmerna_coverage": "0.97"}', 3, NULL, NULL, NULL, NULL, false),
- ('6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f', 'test@foo.bar', 1, '{"max_barcode_errors": "1.5", "sequence_max_n": "0", "max_bad_run_length": "3", "phred_offset": "auto", "rev_comp": "False", "phred_quality_threshold": "3", "input_data": "1", "rev_comp_barcode": "False", "rev_comp_mapping_barcodes": "False", "min_per_read_length_fraction": "0.75", "barcode_type": "golay_12"}', 3, NULL, NULL, NULL, NULL, false),
- ('8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0', 'test@foo.bar', 12, '{"biom_table": "8", "depth": "9000", "subsample_multinomial": "False"}', 3, NULL, NULL, NULL, NULL, false),
- ('063e553b-327c-4818-ab4a-adfe58e49860', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 1, NULL, NULL, NULL, NULL, true),
- ('bcc7ebcd-39c1-43e4-af2d-822e3589f14d', 'test@foo.bar', 2, '{"min_seq_len":100,"max_seq_len":1000,"trim_seq_length":false,"min_qual_score":25,"max_ambig":6,"max_homopolymer":6,"max_primer_mismatch":0,"barcode_type":"golay_12","max_barcode_errors":1.5,"disable_bc_correction":false,"qual_score_window":0,"disable_primers":false,"reverse_primers":"disable","reverse_primer_mismatches":0,"truncate_ambi_bases":false,"input_data":1}', 2, NULL, '2015-11-22 21:00:00', 'demultiplexing', NULL, true),
- ('d19f76ee-274e-4c1b-b3a2-a12d73507c55', 'shared@foo.bar', 3, '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,"input_data":2}', 4, 1, '2015-11-22 21:30:00', 'generating demux file', NULL, true),
- ('ac653cb5-76a6-4a45-929e-eb9b2dee6b63', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1}', 5, NULL, NULL, NULL, NULL, true);
-
-INSERT INTO qiita.artifact_processing_job (artifact_id, processing_job_id) VALUES
- (1, '6d368e16-2242-4cf8-87b4-a5dc40bb890b'),
- (1, '4c7115e8-4c8e-424c-bf25-96c292ca1931'),
- (2, '3c9991ab-6c14-4368-a48c-841e8837a79c'),
- (1, '063e553b-327c-4818-ab4a-adfe58e49860'),
- (1, 'bcc7ebcd-39c1-43e4-af2d-822e3589f14d'),
- (1, 'b72369f9-a886-4193-8d3d-f7b504168e75'),
- (2, 'd19f76ee-274e-4c1b-b3a2-a12d73507c55'),
- (1, '46b76f74-e100-47aa-9bf2-c0208bcea52d'),
- (2, '80bf25f3-5f1d-4e10-9369-315e4244f6d5'),
- (2, '9ba5ae7a-41e1-4202-b396-0259aeaac366'),
- (2, 'e5609746-a985-41a1-babf-6b3ebe9eb5a9'),
- (1, '6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f'),
- (8, '8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0');
-
-INSERT INTO qiita.artifact_output_processing_job (artifact_id, processing_job_id, command_output_id) VALUES
- (3, '46b76f74-e100-47aa-9bf2-c0208bcea52d', 1),
- (6, '80bf25f3-5f1d-4e10-9369-315e4244f6d5', 3),
- (5, '9ba5ae7a-41e1-4202-b396-0259aeaac366', 3),
- (4, 'e5609746-a985-41a1-babf-6b3ebe9eb5a9', 3),
- (2, '6ad4d590-4fa3-44d3-9a8f-ddbb472b1b5f', 1),
- (9, '8a7a8461-e8a1-4b4e-a428-1bc2f4d3ebd0', 7);
-
--- Add client ids and secrets
-
-INSERT INTO qiita.oauth_identifiers (client_id) VALUES ('DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4UqE');
-INSERT INTO qiita.oauth_identifiers (client_id, client_secret) VALUES ('19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDaO4', 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2JKhAmmCWZuabe0O5Mp28s1');
-INSERT INTO qiita.oauth_identifiers (client_id, client_secret) VALUES ('yKDgajoKn5xlOA8tpo48Rq8mWJkH9z4LBCx2SvqWYLIryaan2u', '9xhU5rvzq8dHCEI5sSN95jesUULrZi6pT6Wuc71fDbFbsrnWarcSq56TJLN4kP4hH');
-INSERT INTO qiita.oauth_identifiers (client_id, client_secret) VALUES ('dHgaXDwq665ksFPqfIoD3Jt8KRXdSioTRa4lGa5mGDnz6JTIBf', 'xqx61SD4M2EWbaS0WYv3H1nIemkvEAMIn16XMLjy5rTCqi7opCcWbfLINEwtV48bQ');
-INSERT INTO qiita.oauth_identifiers (client_id, client_secret) VALUES ('4MOBzUBHBtUmwhaC258H7PS0rBBLyGQrVxGPgc9g305bvVhf6h', 'rFb7jwAb3UmSUN57Bjlsi4DTl2owLwRpwCc0SggRNEVb2Ebae2p5Umnq20rNMhmqN');
-
-UPDATE qiita.oauth_software SET client_id = 'yKDgajoKn5xlOA8tpo48Rq8mWJkH9z4LBCx2SvqWYLIryaan2u' WHERE software_id = 1;
-UPDATE qiita.oauth_software SET client_id = 'dHgaXDwq665ksFPqfIoD3Jt8KRXdSioTRa4lGa5mGDnz6JTIBf' WHERE software_id = 2;
-UPDATE qiita.oauth_software SET client_id = '4MOBzUBHBtUmwhaC258H7PS0rBBLyGQrVxGPgc9g305bvVhf6h' WHERE software_id = 3;
-
--- Add a processing workflow
-INSERT INTO qiita.processing_job_workflow (email, name)
- VALUES ('shared@foo.bar', 'Testing processing workflow'),
- ('test@foo.bar', 'Single node workflow');
-
-INSERT INTO qiita.processing_job_workflow_root (processing_job_workflow_id, processing_job_id)
- VALUES ('1', 'b72369f9-a886-4193-8d3d-f7b504168e75'),
- ('2', 'ac653cb5-76a6-4a45-929e-eb9b2dee6b63');
-
-INSERT INTO qiita.parent_processing_job (parent_id, child_id)
- VALUES ('b72369f9-a886-4193-8d3d-f7b504168e75', 'd19f76ee-274e-4c1b-b3a2-a12d73507c55');
+--
+-- Data for Name: software_artifact_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.software_artifact_type VALUES (2, 7);
+INSERT INTO qiita.software_artifact_type VALUES (3, 1);
+INSERT INTO qiita.software_artifact_type VALUES (3, 3);
+INSERT INTO qiita.software_artifact_type VALUES (3, 4);
+INSERT INTO qiita.software_artifact_type VALUES (3, 2);
+INSERT INTO qiita.software_artifact_type VALUES (3, 5);
+INSERT INTO qiita.software_artifact_type VALUES (3, 6);
+
+
+--
+-- Data for Name: software_publication; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.software_publication VALUES (1, '10.1038/nmeth.f.303');
+INSERT INTO qiita.software_publication VALUES (2, '10.1186/2047-217X-1-7');
+
+
+--
+-- Data for Name: stats_daily; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+
+
+--
+-- Data for Name: study_artifact; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_artifact VALUES (1, 1);
+INSERT INTO qiita.study_artifact VALUES (1, 2);
+INSERT INTO qiita.study_artifact VALUES (1, 3);
+INSERT INTO qiita.study_artifact VALUES (1, 4);
+INSERT INTO qiita.study_artifact VALUES (1, 5);
+INSERT INTO qiita.study_artifact VALUES (1, 6);
+INSERT INTO qiita.study_artifact VALUES (1, 7);
+
+
+--
+-- Data for Name: study_environmental_package; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_environmental_package VALUES (1, 'soil');
+INSERT INTO qiita.study_environmental_package VALUES (1, 'plant-associated');
+
+
+--
+-- Data for Name: study_portal; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_portal VALUES (1, 1);
+
+
+--
+-- Data for Name: study_prep_template; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_prep_template VALUES (1, 1);
+INSERT INTO qiita.study_prep_template VALUES (1, 2);
+
+
+--
+-- Data for Name: study_publication; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_publication VALUES (1, '10.100/123456', true);
+INSERT INTO qiita.study_publication VALUES (1, '123456', false);
+INSERT INTO qiita.study_publication VALUES (1, '10.100/7891011', true);
+INSERT INTO qiita.study_publication VALUES (1, '7891011', false);
+
+
+--
+-- Data for Name: study_users; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.study_users VALUES (1, 'shared@foo.bar');
+
+
+--
+-- Data for Name: term; Type: TABLE DATA; Schema: qiita; Owner: antoniog
+--
+
+INSERT INTO qiita.term VALUES (2052508974, 999999999, NULL, 'WGS', 'ENA:0000059', NULL, NULL, NULL, NULL, NULL, false);
+INSERT INTO qiita.term VALUES (2052508975, 999999999, NULL, 'Metagenomics', 'ENA:0000060', NULL, NULL, NULL, NULL, NULL, false);
+INSERT INTO qiita.term VALUES (2052508976, 999999999, NULL, 'Amplicon', 'ENA:0000061', NULL, NULL, NULL, NULL, NULL, false);
+INSERT INTO qiita.term VALUES (2052508984, 999999999, NULL, 'RNA-Seq', 'ENA:0000070', NULL, NULL, NULL, NULL, NULL, false);
+INSERT INTO qiita.term VALUES (2052508987, 999999999, NULL, 'Other', 'ENA:0000069', NULL, NULL, NULL, NULL, NULL, false);
+
+
+--
+-- Name: analysis_analysis_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.analysis_analysis_id_seq', 10, true);
+
+
+--
+-- Name: archive_merging_scheme_archive_merging_scheme_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.archive_merging_scheme_archive_merging_scheme_id_seq', 1, false);
+
+
+--
+-- Name: artifact_artifact_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.artifact_artifact_id_seq', 9, true);
+
+
+--
+-- Name: checksum_algorithm_checksum_algorithm_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.checksum_algorithm_checksum_algorithm_id_seq', 1, true);
+
+
+--
+-- Name: column_controlled_vocabularies_controlled_vocab_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.column_controlled_vocabularies_controlled_vocab_id_seq', 1, false);
+
+
+--
+-- Name: command_output_command_output_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.command_output_command_output_id_seq', 7, true);
+
+
+--
+-- Name: command_parameter_command_parameter_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.command_parameter_command_parameter_id_seq', 98, true);
+
+
+--
+-- Name: controlled_vocab_controlled_vocab_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.controlled_vocab_controlled_vocab_id_seq', 1, false);
+
+
+--
+-- Name: controlled_vocab_values_vocab_value_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.controlled_vocab_values_vocab_value_id_seq', 1, false);
+
+
+--
+-- Name: data_directory_data_directory_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.data_directory_data_directory_id_seq', 16, true);
+
+
+--
+-- Name: data_type_data_type_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.data_type_data_type_id_seq', 12, true);
+
+
+--
+-- Name: default_parameter_set_default_parameter_set_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.default_parameter_set_default_parameter_set_id_seq', 16, true);
+
+
+--
+-- Name: default_workflow_default_workflow_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.default_workflow_default_workflow_id_seq', 3, true);
+
+
+--
+-- Name: default_workflow_edge_default_workflow_edge_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.default_workflow_edge_default_workflow_edge_id_seq', 3, true);
+
+
+--
+-- Name: default_workflow_node_default_workflow_node_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.default_workflow_node_default_workflow_node_id_seq', 6, true);
+
+
+--
+-- Name: filepath_data_directory_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.filepath_data_directory_id_seq', 1, false);
+
+
+--
+-- Name: filepath_filepath_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.filepath_filepath_id_seq', 22, true);
+
+
+--
+-- Name: filepath_type_filepath_type_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.filepath_type_filepath_type_id_seq', 25, true);
+
+
+--
+-- Name: filetype_filetype_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.filetype_filetype_id_seq', 10, true);
+
+
+--
+-- Name: investigation_investigation_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.investigation_investigation_id_seq', 1, true);
+
+
+--
+-- Name: logging_logging_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.logging_logging_id_seq', 2, true);
+
+
+--
+-- Name: message_message_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.message_message_id_seq', 3, true);
+
+
+--
+-- Name: parameter_artifact_type_command_parameter_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.parameter_artifact_type_command_parameter_id_seq', 1, false);
+
+
+--
+-- Name: portal_type_portal_type_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.portal_type_portal_type_id_seq', 3, true);
+
+
+--
+-- Name: prep_template_prep_template_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.prep_template_prep_template_id_seq', 2, true);
+
+
+--
+-- Name: processing_job_status_processing_job_status_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.processing_job_status_processing_job_status_id_seq', 6, true);
+
+
+--
+-- Name: processing_job_workflow_processing_job_workflow_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.processing_job_workflow_processing_job_workflow_id_seq', 2, true);
+
+
+--
+-- Name: reference_reference_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.reference_reference_id_seq', 2, true);
+
+
+--
+-- Name: severity_severity_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.severity_severity_id_seq', 3, true);
+
+
+--
+-- Name: software_command_command_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.software_command_command_id_seq', 28, true);
+
+
+--
+-- Name: software_software_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.software_software_id_seq', 4, true);
+
+
+--
+-- Name: software_type_software_type_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.software_type_software_type_id_seq', 3, true);
+
+
+--
+-- Name: study_person_study_person_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.study_person_study_person_id_seq', 3, true);
+
+
+--
+-- Name: study_status_study_status_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.study_status_study_status_id_seq', 5, true);
+
+
+--
+-- Name: study_study_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.study_study_id_seq', 1, true);
+
+
+--
+-- Name: term_term_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.term_term_id_seq', 1, false);
+
+
+--
+-- Name: timeseries_type_timeseries_type_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.timeseries_type_timeseries_type_id_seq', 10, true);
+
+
+--
+-- Name: user_level_user_level_id_seq; Type: SEQUENCE SET; Schema: qiita; Owner: antoniog
+--
+
+SELECT pg_catalog.setval('qiita.user_level_user_level_id_seq', 7, true);
+
+
+--
+-- PostgreSQL database dump complete
+--
diff --git a/qiita_db/support_files/qiita-db-settings.sql b/qiita_db/support_files/qiita-db-settings.sql
index 23a16ac70..ff9922da8 100644
--- a/qiita_db/support_files/qiita-db-settings.sql
+++ b/qiita_db/support_files/qiita-db-settings.sql
@@ -2,5 +2,7 @@ CREATE TABLE settings (
test bool DEFAULT True NOT NULL,
base_data_dir varchar NOT NULL,
base_work_dir varchar NOT NULL,
- current_patch varchar DEFAULT 'unpatched' NOT NULL
+ current_patch varchar DEFAULT 'unpatched' NOT NULL,
+ max_preparation_samples INTEGER DEFAULT 800,
+ max_artifacts_in_workflow INTEGER DEFAULT 35
);
diff --git a/qiita_db/support_files/qiita-db-unpatched.sql b/qiita_db/support_files/qiita-db-unpatched.sql
index e542f80ac..1ce86de39 100644
--- a/qiita_db/support_files/qiita-db-unpatched.sql
+++ b/qiita_db/support_files/qiita-db-unpatched.sql
@@ -1,914 +1,5350 @@
+--
+-- PostgreSQL database dump
+--
+
+-- Dumped from database version 13.9
+-- Dumped by pg_dump version 13.9
+
+-- SET statement_timeout = 0;
+-- SET lock_timeout = 0;
+-- SET idle_in_transaction_session_timeout = 0;
+-- SET client_encoding = 'UTF8';
+-- SET standard_conforming_strings = on;
+-- SELECT pg_catalog.set_config('search_path', '', false);
+-- SET check_function_bodies = false;
+-- SET xmloption = content;
+-- SET client_min_messages = warning;
+-- SET row_security = off;
+
+--
+-- Name: qiita; Type: SCHEMA; Schema: -
+--
+
CREATE SCHEMA qiita;
-CREATE TABLE qiita.analysis_status (
- analysis_status_id bigserial NOT NULL,
- status varchar NOT NULL,
- CONSTRAINT pk_analysis_status PRIMARY KEY ( analysis_status_id ),
- CONSTRAINT idx_analysis_status UNIQUE ( status )
- );
-CREATE TABLE qiita.checksum_algorithm (
- checksum_algorithm_id bigserial NOT NULL,
- name varchar NOT NULL,
- CONSTRAINT pk_checksum_algorithm PRIMARY KEY ( checksum_algorithm_id ),
- CONSTRAINT idx_checksum_algorithm UNIQUE ( name )
- );
-CREATE TABLE qiita.command (
- command_id bigserial NOT NULL,
- name varchar NOT NULL,
- command varchar NOT NULL,
- input varchar NOT NULL,
- required varchar NOT NULL,
- optional varchar NOT NULL,
- output varchar NOT NULL,
- CONSTRAINT pk_command PRIMARY KEY ( command_id )
- );
+--
+-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: -
+--
-COMMENT ON TABLE qiita.command IS 'Available commands for jobs';
+CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public;
-COMMENT ON COLUMN qiita.command.command_id IS 'Unique identifier for function';
-COMMENT ON COLUMN qiita.command.command IS 'What command to call to run this function';
+--
+-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner:
+--
-COMMENT ON COLUMN qiita.command.input IS 'JSON of input options for the command';
+COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)';
+
+
+--
+-- Name: archive_upsert(integer, character varying, character varying); Type: FUNCTION; Schema: public
+--
+
+CREATE OR REPLACE FUNCTION public.archive_upsert(amsi integer, af character varying, afv character varying) RETURNS void
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ LOOP
+ -- first try to update the key
+ UPDATE qiita.archive_feature_value SET archive_feature_value = afv WHERE archive_merging_scheme_id = amsi AND archive_feature = af;
+ IF found THEN
+ RETURN;
+ END IF;
+ -- not there, so try to insert the key
+ -- if someone else inserts the same key concurrently,
+ -- we could get a unique-key failure
+ BEGIN
+ INSERT INTO qiita.archive_feature_value (archive_merging_scheme_id, archive_feature, archive_feature_value) VALUES (amsi, af, afv);
+ RETURN;
+ EXCEPTION WHEN unique_violation THEN
+ -- Do nothing, and loop to try the UPDATE again.
+ END;
+ END LOOP;
+END;
+$$;
+
+
+
+--
+-- Name: isnumeric(text); Type: FUNCTION; Schema: public
+--
+
+CREATE OR REPLACE FUNCTION public.isnumeric(text) RETURNS boolean
+ LANGUAGE plpgsql IMMUTABLE STRICT
+ AS $_$
+DECLARE x NUMERIC;
+BEGIN
+ x = $1::NUMERIC;
+ RETURN TRUE;
+EXCEPTION WHEN others THEN
+ RETURN FALSE;
+END;
+$_$;
+
+
+
+SET default_tablespace = '';
+
+SET default_table_access_method = heap;
+
+--
+-- Name: parent_artifact; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.parent_artifact (
+ artifact_id bigint NOT NULL,
+ parent_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: artifact_ancestry(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.artifact_ancestry(a_id bigint) RETURNS SETOF qiita.parent_artifact
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE artifact_id = a_id) THEN
+ RETURN QUERY WITH RECURSIVE root AS (
+ SELECT artifact_id, parent_id
+ FROM qiita.parent_artifact
+ WHERE artifact_id = a_id
+ UNION
+ SELECT p.artifact_id, p.parent_id
+ FROM qiita.parent_artifact p
+ JOIN root r ON (r.parent_id = p.artifact_id)
+ )
+ SELECT DISTINCT artifact_id, parent_id
+ FROM root;
+ END IF;
+END
+$$;
+
+
+
+--
+-- Name: artifact_descendants(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.artifact_descendants(a_id bigint) RETURNS SETOF qiita.parent_artifact
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE parent_id = a_id) THEN
+ RETURN QUERY WITH RECURSIVE root AS (
+ SELECT artifact_id, parent_id
+ FROM qiita.parent_artifact
+ WHERE parent_id = a_id
+ UNION
+ SELECT p.artifact_id, p.parent_id
+ FROM qiita.parent_artifact p
+ JOIN root r ON (r.artifact_id = p.parent_id)
+ )
+ SELECT DISTINCT artifact_id, parent_id
+ FROM root;
+ END IF;
+END
+$$;
+
+
+
+--
+-- Name: artifact_descendants_with_jobs(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.artifact_descendants_with_jobs(a_id bigint) RETURNS TABLE(processing_job_id uuid, input_id bigint, output_id bigint)
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ IF EXISTS(SELECT * FROM qiita.artifact WHERE artifact_id = a_id) THEN
+ RETURN QUERY WITH RECURSIVE root AS (
+ SELECT qiita.artifact_processing_job.processing_job_id AS processing_job_id,
+ qiita.artifact_processing_job.artifact_id AS input_id,
+ qiita.artifact_output_processing_job.artifact_id AS output_id
+ FROM qiita.artifact_processing_job
+ LEFT JOIN qiita.artifact_output_processing_job USING (processing_job_id)
+ WHERE qiita.artifact_processing_job.artifact_id = a_id
+ UNION
+ SELECT apj.processing_job_id AS processing_job_id,
+ apj.artifact_id AS input_id,
+ aopj.artifact_id AS output_id
+ FROM qiita.artifact_processing_job apj
+ LEFT JOIN qiita.artifact_output_processing_job aopj USING (processing_job_id)
+ JOIN root r ON (r.output_id = apj.artifact_id)
+ )
+ SELECT DISTINCT root.processing_job_id, root.input_id, root.output_id
+ FROM root
+ WHERE root.output_id IS NOT NULL
+ ORDER BY root.input_id ASC, root.output_id ASC;
+ END IF;
+END
+$$;
+
+
+
+--
+-- Name: bioms_from_preparation_artifacts(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.bioms_from_preparation_artifacts(prep_id bigint) RETURNS text
+ LANGUAGE plpgsql
+ AS $$
+DECLARE
+ artifacts TEXT := NULL;
+BEGIN
+ SELECT array_to_string(array_agg(artifact_id), ',') INTO artifacts
+ FROM qiita.preparation_artifact
+ LEFT JOIN qiita.artifact USING (artifact_id)
+ LEFT JOIN qiita.artifact_type USING (artifact_type_id)
+ LEFT JOIN qiita.software_command USING (command_id)
+ LEFT JOIN qiita.software USING (software_id)
+ LEFT JOIN qiita.visibility USING (visibility_id)
+ WHERE
+ prep_template_id = prep_id AND
+ artifact_type = 'BIOM' AND
+ NOT deprecated AND
+ visibility != 'archived';
+ RETURN artifacts;
+END
+$$;
+
+
+
+--
+-- Name: check_collection_access(); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.check_collection_access() RETURNS trigger
+ LANGUAGE plpgsql STABLE
+ AS $$
+ BEGIN
+ IF NOT EXISTS (
+ SELECT aj.* FROM qiita.analysis_job aj
+ LEFT JOIN qiita.collection_analysis ca
+ ON aj.analysis_id = ca.analysis_id
+ WHERE aj.job_id = NEW.job_id and ca.collection_id = NEW.collection_id
+ ) THEN
+ RAISE EXCEPTION 'Jobs inserted that do not belong to collection' USING ERRCODE = 'unique_violation';
+ RETURN OLD;
+ ELSE
+ RETURN NEW;
+ END IF;
+ RETURN NULL;
+ END;
+ $$;
+
+
+
+--
+-- Name: find_artifact_roots(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.find_artifact_roots(a_id bigint) RETURNS SETOF bigint
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ IF EXISTS(SELECT * FROM qiita.parent_artifact WHERE artifact_id = a_id) THEN
+ RETURN QUERY WITH RECURSIVE root AS (
+ SELECT artifact_id, parent_id
+ FROM qiita.parent_artifact
+ WHERE artifact_id = a_id
+ UNION
+ SELECT p.artifact_id, p.parent_id
+ FROM qiita.parent_artifact p
+ JOIN root r ON (r.parent_id = p.artifact_id)
+ )
+ SELECT DISTINCT parent_id
+ FROM root
+ WHERE parent_id NOT IN (SELECT artifact_id
+ FROM qiita.parent_artifact);
+ ELSE
+ RETURN QUERY SELECT a_id;
+ END IF;
+END
+$$;
+
+
+
+--
+-- Name: parent_processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.parent_processing_job (
+ parent_id uuid NOT NULL,
+ child_id uuid NOT NULL
+);
+
+
+
+--
+-- Name: get_processing_workflow_edges(bigint); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.get_processing_workflow_edges(wf_id bigint) RETURNS SETOF qiita.parent_processing_job
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ RETURN QUERY WITH RECURSIVE edges AS (
+ SELECT parent_id, child_id
+ FROM qiita.parent_processing_job
+ WHERE parent_id IN (SELECT processing_job_id
+ FROM qiita.processing_job_workflow_root
+ WHERE processing_job_workflow_id = wf_id)
+ UNION
+ SELECT p.parent_id, p.child_id
+ FROM qiita.parent_processing_job p
+ JOIN edges e ON (e.child_id = p.parent_id)
+ )
+ SELECT DISTINCT parent_id, child_id
+ FROM edges;
+END
+$$;
+
+
+
+--
+-- Name: get_processing_workflow_roots(uuid); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.get_processing_workflow_roots(job_id uuid) RETURNS SETOF uuid
+ LANGUAGE plpgsql
+ AS $$
+BEGIN
+ IF EXISTS(SELECT * FROM qiita.processing_job_workflow_root WHERE processing_job_id = job_id) THEN
+ RETURN QUERY SELECT job_id;
+ ELSE
+ RETURN QUERY WITH RECURSIVE root AS (
+ SELECT child_id, parent_id
+ FROM qiita.parent_processing_job
+ WHERE child_id = job_id
+ UNION
+ SELECT p.child_id, p.parent_id
+ FROM qiita.parent_processing_job p
+ JOIN root r ON (r.parent_id = p.child_id)
+ )
+ SELECT DISTINCT parent_id
+ FROM root
+ WHERE parent_id NOT IN (SELECT child_id FROM qiita.parent_processing_job);
+ END IF;
+END
+$$;
+
+
+
+--
+-- Name: json_object_set_key(json, text, anyelement); Type: FUNCTION; Schema: qiita
+--
+
+CREATE OR REPLACE FUNCTION qiita.json_object_set_key(json json, key_to_set text, value_to_set anyelement) RETURNS json
+ LANGUAGE sql IMMUTABLE STRICT
+ AS $$
+SELECT concat('{', string_agg(to_json("key") || ':' || "value", ','), '}')::json
+ FROM (SELECT *
+ FROM json_each("json")
+ WHERE "key" <> "key_to_set"
+ UNION ALL
+ SELECT "key_to_set", to_json("value_to_set")) AS "fields"
+$$;
-COMMENT ON COLUMN qiita.command.required IS 'JSON of required options for the command';
-COMMENT ON COLUMN qiita.command.optional IS 'JSON of optional options for command';
-COMMENT ON COLUMN qiita.command.output IS 'JSON of output options for the command';
-CREATE TABLE qiita.controlled_vocab (
- controlled_vocab_id bigserial NOT NULL,
- controlled_vocab varchar NOT NULL,
- CONSTRAINT pk_controlled_vocabularies PRIMARY KEY ( controlled_vocab_id )
- );
+CREATE TABLE qiita.analysis (
+ analysis_id bigint NOT NULL,
+ email character varying NOT NULL,
+ name character varying NOT NULL,
+ description character varying NOT NULL,
+ pmid character varying,
+ "timestamp" timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
+ dflt boolean DEFAULT false NOT NULL,
+ logging_id bigint,
+ slurm_reservation character varying DEFAULT ''::character varying NOT NULL
+);
+
+
+--
+-- Name: TABLE analysis; Type: COMMENT; Schema: qiita
+--
-CREATE TABLE qiita.controlled_vocab_values (
- vocab_value_id bigserial NOT NULL,
- controlled_vocab_id bigint NOT NULL,
- term varchar NOT NULL,
- order_by varchar NOT NULL,
- default_item varchar ,
- CONSTRAINT pk_controlled_vocab_values PRIMARY KEY ( vocab_value_id ),
- CONSTRAINT fk_controlled_vocab_values FOREIGN KEY ( controlled_vocab_id ) REFERENCES qiita.controlled_vocab( controlled_vocab_id ) ON DELETE CASCADE ON UPDATE CASCADE
- );
+COMMENT ON TABLE qiita.analysis IS 'hHolds analysis information';
-CREATE INDEX idx_controlled_vocab_values ON qiita.controlled_vocab_values ( controlled_vocab_id );
-CREATE TABLE qiita.data_directory (
- data_directory_id bigserial NOT NULL,
- data_type varchar NOT NULL,
- mountpoint varchar NOT NULL,
- subdirectory varchar NOT NULL,
- active bool NOT NULL,
- CONSTRAINT pk_data_directory PRIMARY KEY ( data_directory_id )
- );
+--
+-- Name: COLUMN analysis.analysis_id; Type: COMMENT; Schema: qiita
+--
-CREATE TABLE qiita.data_type (
- data_type_id bigserial NOT NULL,
- data_type varchar NOT NULL,
- CONSTRAINT pk_data_type PRIMARY KEY ( data_type_id ),
- CONSTRAINT idx_data_type UNIQUE ( data_type )
- );
+COMMENT ON COLUMN qiita.analysis.analysis_id IS 'Unique identifier for analysis';
-COMMENT ON COLUMN qiita.data_type.data_type IS 'Data type (16S, metabolome, etc) the job will use';
-CREATE TABLE qiita.emp_status (
- emp_status_id bigserial NOT NULL,
- emp_status varchar NOT NULL,
- CONSTRAINT pk_emp_status PRIMARY KEY ( emp_status_id ),
- CONSTRAINT idx_emp_status UNIQUE ( emp_status )
- );
+--
+-- Name: COLUMN analysis.email; Type: COMMENT; Schema: qiita
+--
-COMMENT ON TABLE qiita.emp_status IS 'All possible statuses for projects relating to EMP. Whether they are part of, processed in accordance to, or not part of EMP.';
+COMMENT ON COLUMN qiita.analysis.email IS 'Email for user who owns the analysis';
-CREATE TABLE qiita.filepath_type (
- filepath_type_id bigserial NOT NULL,
- filepath_type varchar ,
- CONSTRAINT pk_filepath_type PRIMARY KEY ( filepath_type_id ),
- CONSTRAINT idx_filepath_type UNIQUE ( filepath_type )
- );
-
-CREATE TABLE qiita.filetype (
- filetype_id bigserial NOT NULL,
- type varchar NOT NULL,
- CONSTRAINT pk_filetype PRIMARY KEY ( filetype_id ),
- CONSTRAINT idx_filetype UNIQUE ( type )
- );
-
-COMMENT ON TABLE qiita.filetype IS 'Type of file (FASTA, FASTQ, SPECTRA, etc)';
-
-CREATE TABLE qiita.job_status (
- job_status_id bigserial NOT NULL,
- status varchar NOT NULL,
- CONSTRAINT pk_job_status PRIMARY KEY ( job_status_id ),
- CONSTRAINT idx_job_status_0 UNIQUE ( status )
- );
-CREATE TABLE qiita.mixs_field_description (
- column_name varchar NOT NULL,
- data_type varchar NOT NULL,
- desc_or_value varchar NOT NULL,
- definition varchar NOT NULL,
- min_length integer ,
- active integer NOT NULL,
- CONSTRAINT pk_mixs_field_description PRIMARY KEY ( column_name )
- );
+--
+-- Name: COLUMN analysis.name; Type: COMMENT; Schema: qiita
+--
-CREATE TABLE qiita.ontology (
- ontology_id bigint NOT NULL,
- ontology varchar NOT NULL,
- fully_loaded bool NOT NULL,
- fullname varchar ,
- query_url varchar ,
- source_url varchar ,
- definition text ,
- load_date date NOT NULL,
- CONSTRAINT pk_ontology PRIMARY KEY ( ontology_id ),
- CONSTRAINT idx_ontology UNIQUE ( ontology )
- );
+COMMENT ON COLUMN qiita.analysis.name IS 'Name of the analysis';
-CREATE TABLE qiita.portal_type (
- portal_type_id bigserial NOT NULL,
- portal varchar NOT NULL,
- description varchar NOT NULL,
- CONSTRAINT pk_portal_type PRIMARY KEY ( portal_type_id )
- );
-COMMENT ON TABLE qiita.portal_type IS 'What portals are available to show a study in';
+--
+-- Name: COLUMN analysis.pmid; Type: COMMENT; Schema: qiita
+--
-CREATE TABLE qiita.preprocessed_data (
- preprocessed_data_id bigserial NOT NULL,
- preprocessed_params_table varchar NOT NULL,
- preprocessed_params_id bigint NOT NULL,
- submitted_to_insdc_status varchar DEFAULT 'not submitted' NOT NULL,
- ebi_submission_accession varchar ,
- ebi_study_accession varchar ,
- data_type_id bigint NOT NULL,
- link_filepaths_status varchar DEFAULT 'idle' NOT NULL,
- CONSTRAINT pk_preprocessed_data PRIMARY KEY ( preprocessed_data_id ),
- CONSTRAINT fk_preprocessed_data FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id )
- );
-
-CREATE INDEX idx_preprocessed_data ON qiita.preprocessed_data ( data_type_id );
-
-COMMENT ON COLUMN qiita.preprocessed_data.preprocessed_params_table IS 'Name of table holding the params';
-
-CREATE TABLE qiita.preprocessed_sequence_454_params (
- preprocessed_params_id bigserial NOT NULL,
- trim_length integer NOT NULL,
- CONSTRAINT pk_preprocessed_sequence_454_params PRIMARY KEY ( preprocessed_params_id )
- );
-
-COMMENT ON TABLE qiita.preprocessed_sequence_454_params IS 'Parameters used for processing sequence data.';
-
-CREATE TABLE qiita.preprocessed_sequence_illumina_params (
- preprocessed_params_id bigserial NOT NULL,
- max_bad_run_length integer DEFAULT 3 NOT NULL,
- min_per_read_length_fraction real DEFAULT 0.75 NOT NULL,
- sequence_max_n integer DEFAULT 0 NOT NULL,
- rev_comp_barcode bool DEFAULT FALSE NOT NULL,
- rev_comp_mapping_barcodes bool DEFAULT FALSE NOT NULL,
- rev_comp bool DEFAULT FALSE NOT NULL,
- phred_quality_threshold integer DEFAULT 3 NOT NULL,
- barcode_type varchar DEFAULT 'golay_12' NOT NULL,
- max_barcode_errors real DEFAULT 1.5 NOT NULL,
- CONSTRAINT pk_preprocessed_sequence_illumina_params PRIMARY KEY ( preprocessed_params_id )
- );
-
-COMMENT ON TABLE qiita.preprocessed_sequence_illumina_params IS 'Parameters used for processing illumina sequence data.';
-
-CREATE TABLE qiita.preprocessed_spectra_params (
- preprocessed_params_id bigserial NOT NULL,
- col varchar ,
- CONSTRAINT pk_preprocessed_spectra_params PRIMARY KEY ( preprocessed_params_id )
- );
-
-COMMENT ON TABLE qiita.preprocessed_spectra_params IS 'Parameters used for processing spectra data.';
-
-CREATE TABLE qiita.processed_data (
- processed_data_id bigserial NOT NULL,
- processed_params_table varchar NOT NULL,
- processed_params_id bigint NOT NULL,
- processed_date timestamp NOT NULL,
- data_type_id bigint NOT NULL,
- link_filepaths_status varchar DEFAULT 'idle' NOT NULL,
- CONSTRAINT pk_processed_data PRIMARY KEY ( processed_data_id ),
- CONSTRAINT fk_processed_data FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id )
- );
-
-CREATE INDEX idx_processed_data ON qiita.processed_data ( data_type_id );
-
-COMMENT ON COLUMN qiita.processed_data.processed_params_table IS 'Name of table holding processing params';
-
-COMMENT ON COLUMN qiita.processed_data.processed_params_id IS 'Link to a table with the parameters used to generate processed data';
-
-CREATE TABLE qiita.raw_data (
- raw_data_id bigserial NOT NULL,
- filetype_id bigint NOT NULL,
- link_filepaths_status varchar DEFAULT 'idle' NOT NULL,
- CONSTRAINT pk_raw_data UNIQUE ( raw_data_id ) ,
- CONSTRAINT pk_raw_data_0 PRIMARY KEY ( raw_data_id ),
- CONSTRAINT fk_raw_data_filetype FOREIGN KEY ( filetype_id ) REFERENCES qiita.filetype( filetype_id )
- );
-
-CREATE INDEX idx_raw_data ON qiita.raw_data ( filetype_id );
-
-CREATE TABLE qiita.required_sample_info_status (
- required_sample_info_status_id bigserial NOT NULL,
- status varchar ,
- CONSTRAINT pk_sample_status PRIMARY KEY ( required_sample_info_status_id ),
- CONSTRAINT idx_required_sample_info_status UNIQUE ( status )
- );
+COMMENT ON COLUMN qiita.analysis.pmid IS 'PMID of paper from the analysis';
-CREATE TABLE qiita.severity (
- severity_id serial NOT NULL,
- severity varchar NOT NULL,
- CONSTRAINT pk_severity PRIMARY KEY ( severity_id ),
- CONSTRAINT idx_severity UNIQUE ( severity )
- );
-CREATE TABLE qiita.study_person (
- study_person_id bigserial NOT NULL,
- name varchar NOT NULL,
- email varchar NOT NULL,
- affiliation varchar NOT NULL,
- address varchar(100) ,
- phone varchar ,
- CONSTRAINT pk_study_person PRIMARY KEY ( study_person_id ),
- CONSTRAINT idx_study_person UNIQUE ( name, affiliation )
- );
+--
+-- Name: analysis_analysis_id_seq; Type: SEQUENCE; Schema: qiita
+--
-COMMENT ON TABLE qiita.study_person IS 'Contact information for the various people involved in a study';
+CREATE SEQUENCE qiita.analysis_analysis_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-COMMENT ON COLUMN qiita.study_person.affiliation IS 'The institution with which this person is affiliated';
-CREATE TABLE qiita.study_status (
- study_status_id bigserial NOT NULL,
- status varchar NOT NULL,
- description varchar NOT NULL,
- CONSTRAINT pk_study_status PRIMARY KEY ( study_status_id ),
- CONSTRAINT idx_study_status UNIQUE ( status )
- );
+--
+-- Name: analysis_analysis_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-CREATE TABLE qiita.term (
- term_id bigserial NOT NULL,
- ontology_id bigint NOT NULL,
- old_term_id bigint DEFAULT NULL ,
- term varchar NOT NULL,
- identifier varchar ,
- definition varchar ,
- namespace varchar ,
- is_obsolete bool DEFAULT 'false' ,
- is_root_term bool ,
- is_leaf bool ,
- user_defined bool DEFAULT False NOT NULL,
- CONSTRAINT pk_term PRIMARY KEY ( term_id ),
- CONSTRAINT fk_term_ontology FOREIGN KEY ( ontology_id ) REFERENCES qiita.ontology( ontology_id )
- );
-
-CREATE INDEX idx_term ON qiita.term ( ontology_id );
+ALTER SEQUENCE qiita.analysis_analysis_id_seq OWNED BY qiita.analysis.analysis_id;
-COMMENT ON COLUMN qiita.term.old_term_id IS 'Identifier used in the old system, we are keeping this for consistency';
-COMMENT ON COLUMN qiita.term.user_defined IS 'Whether or not this term was defined by a user';
+--
+-- Name: analysis_artifact; Type: TABLE; Schema: qiita
+--
-CREATE TABLE qiita.timeseries_type (
- timeseries_type_id bigserial NOT NULL,
- timeseries_type varchar NOT NULL,
- CONSTRAINT pk_timeseries_type PRIMARY KEY ( timeseries_type_id ),
- CONSTRAINT idx_timeseries_type UNIQUE ( timeseries_type )
- );
+CREATE TABLE qiita.analysis_artifact (
+ analysis_id bigint NOT NULL,
+ artifact_id bigint NOT NULL
+);
-CREATE TABLE qiita.user_level (
- user_level_id serial NOT NULL,
- name varchar NOT NULL,
- description text NOT NULL,
- CONSTRAINT pk_user_level PRIMARY KEY ( user_level_id ),
- CONSTRAINT idx_user_level UNIQUE ( name )
- );
-COMMENT ON TABLE qiita.user_level IS 'Holds available user levels';
+--
+-- Name: analysis_filepath; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.analysis_filepath (
+ analysis_id bigint NOT NULL,
+ filepath_id bigint NOT NULL,
+ data_type_id bigint
+);
+
+
+
+--
+-- Name: TABLE analysis_filepath; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.analysis_filepath IS 'Stores link between analysis and the data file used for the analysis.';
+
+
+--
+-- Name: analysis_portal; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.analysis_portal (
+ analysis_id bigint NOT NULL,
+ portal_type_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: TABLE analysis_portal; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.analysis_portal IS 'Controls what analyses are visible on what portals';
+
+
+--
+-- Name: analysis_processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.analysis_processing_job (
+ analysis_id bigint NOT NULL,
+ processing_job_id uuid NOT NULL
+);
+
+
+
+--
+-- Name: analysis_sample; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.analysis_sample (
+ analysis_id bigint NOT NULL,
+ sample_id character varying NOT NULL,
+ artifact_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: analysis_users; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.analysis_users (
+ analysis_id bigint NOT NULL,
+ email character varying NOT NULL
+);
+
+
+
+--
+-- Name: TABLE analysis_users; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.analysis_users IS 'Links analyses to the users they are shared with';
+
+
+--
+-- Name: archive_feature_value; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.archive_feature_value (
+ archive_merging_scheme_id bigint NOT NULL,
+ archive_feature character varying NOT NULL,
+ archive_feature_value character varying NOT NULL
+);
+
+
+
+--
+-- Name: archive_merging_scheme; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.archive_merging_scheme (
+ archive_merging_scheme_id bigint NOT NULL,
+ archive_merging_scheme character varying NOT NULL
+);
+
+
+
+--
+-- Name: archive_merging_scheme_archive_merging_scheme_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.archive_merging_scheme_archive_merging_scheme_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: archive_merging_scheme_archive_merging_scheme_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.archive_merging_scheme_archive_merging_scheme_id_seq OWNED BY qiita.archive_merging_scheme.archive_merging_scheme_id;
+
+
+--
+-- Name: artifact; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact (
+ artifact_id bigint NOT NULL,
+ generated_timestamp timestamp without time zone NOT NULL,
+ command_id bigint,
+ command_parameters json,
+ visibility_id bigint NOT NULL,
+ artifact_type_id integer,
+ data_type_id bigint NOT NULL,
+ submitted_to_vamps boolean DEFAULT false NOT NULL,
+ name character varying DEFAULT 'noname'::character varying NOT NULL,
+ archive_data jsonb
+);
+
+
+
+--
+-- Name: TABLE artifact; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.artifact IS 'Represents data in the system';
+
+
+--
+-- Name: COLUMN artifact.visibility_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.artifact.visibility_id IS 'If the artifact is sandbox, awaiting_for_approval, private or public';
+
+
+--
+-- Name: artifact_artifact_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.artifact_artifact_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: artifact_artifact_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.artifact_artifact_id_seq OWNED BY qiita.artifact.artifact_id;
+
+
+--
+-- Name: artifact_filepath; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact_filepath (
+ artifact_id bigint NOT NULL,
+ filepath_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: artifact_output_processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact_output_processing_job (
+ artifact_id bigint NOT NULL,
+ processing_job_id uuid NOT NULL,
+ command_output_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: artifact_processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact_processing_job (
+ artifact_id bigint NOT NULL,
+ processing_job_id uuid NOT NULL
+);
+
+
+
+--
+-- Name: artifact_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact_type (
+ artifact_type_id bigint NOT NULL,
+ artifact_type character varying NOT NULL,
+ description character varying,
+ can_be_submitted_to_ebi boolean DEFAULT false NOT NULL,
+ can_be_submitted_to_vamps boolean DEFAULT false NOT NULL,
+ is_user_uploadable boolean DEFAULT false
+);
+
+
+
+--
+-- Name: TABLE artifact_type; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.artifact_type IS 'Type of file (FASTA, FASTQ, SPECTRA, etc)';
+
+
+--
+-- Name: artifact_type_filepath_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.artifact_type_filepath_type (
+ artifact_type_id bigint NOT NULL,
+ filepath_type_id bigint NOT NULL,
+ required boolean DEFAULT true NOT NULL
+);
+
+
+
+--
+-- Name: checksum_algorithm; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.checksum_algorithm (
+ checksum_algorithm_id bigint NOT NULL,
+ name character varying NOT NULL
+);
+
+
+
+--
+-- Name: checksum_algorithm_checksum_algorithm_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.checksum_algorithm_checksum_algorithm_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: checksum_algorithm_checksum_algorithm_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.checksum_algorithm_checksum_algorithm_id_seq OWNED BY qiita.checksum_algorithm.checksum_algorithm_id;
-COMMENT ON COLUMN qiita.user_level.name IS 'One of the user levels (admin, user, guest, etc)';
+
+--
+-- Name: column_controlled_vocabularies; Type: TABLE; Schema: qiita
+--
CREATE TABLE qiita.column_controlled_vocabularies (
- controlled_vocab_id bigserial NOT NULL,
- column_name varchar NOT NULL,
- CONSTRAINT idx_column_controlled_vocabularies PRIMARY KEY ( controlled_vocab_id, column_name ),
- CONSTRAINT fk_column_controlled_vocabularies FOREIGN KEY ( column_name ) REFERENCES qiita.mixs_field_description( column_name ) ,
- CONSTRAINT fk_column_controlled_vocab2 FOREIGN KEY ( controlled_vocab_id ) REFERENCES qiita.controlled_vocab( controlled_vocab_id )
- );
+ controlled_vocab_id bigint NOT NULL,
+ column_name character varying NOT NULL
+);
+
-CREATE INDEX idx_column_controlled_vocabularies_0 ON qiita.column_controlled_vocabularies ( column_name );
-CREATE INDEX idx_column_controlled_vocabularies_1 ON qiita.column_controlled_vocabularies ( controlled_vocab_id );
+--
+-- Name: TABLE column_controlled_vocabularies; Type: COMMENT; Schema: qiita
+--
COMMENT ON TABLE qiita.column_controlled_vocabularies IS 'Table relates a column with a controlled vocabulary.';
+
+--
+-- Name: column_controlled_vocabularies_controlled_vocab_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.column_controlled_vocabularies_controlled_vocab_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: column_controlled_vocabularies_controlled_vocab_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.column_controlled_vocabularies_controlled_vocab_id_seq OWNED BY qiita.column_controlled_vocabularies.controlled_vocab_id;
+
+
+--
+-- Name: column_ontology; Type: TABLE; Schema: qiita
+--
+
CREATE TABLE qiita.column_ontology (
- column_name varchar NOT NULL,
- ontology_short_name varchar NOT NULL,
- bioportal_id integer NOT NULL,
- ontology_branch_id varchar ,
- CONSTRAINT idx_column_ontology PRIMARY KEY ( column_name, ontology_short_name ),
- CONSTRAINT fk_column_ontology FOREIGN KEY ( column_name ) REFERENCES qiita.mixs_field_description( column_name )
- );
+ column_name character varying NOT NULL,
+ ontology_short_name character varying NOT NULL,
+ bioportal_id integer NOT NULL,
+ ontology_branch_id character varying
+);
+
-CREATE INDEX idx_column_ontology_0 ON qiita.column_ontology ( column_name );
+
+--
+-- Name: TABLE column_ontology; Type: COMMENT; Schema: qiita
+--
COMMENT ON TABLE qiita.column_ontology IS 'This table relates a column with an ontology.';
-CREATE TABLE qiita.command_data_type (
- command_id bigint NOT NULL,
- data_type_id bigint NOT NULL,
- CONSTRAINT idx_command_data_type PRIMARY KEY ( command_id, data_type_id ),
- CONSTRAINT fk_command_data_type FOREIGN KEY ( command_id ) REFERENCES qiita.command( command_id ) ,
- CONSTRAINT fk_command_data_type_0 FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id )
- );
-CREATE INDEX idx_command_data_type_0 ON qiita.command_data_type ( command_id );
+--
+-- Name: command_output; Type: TABLE; Schema: qiita
+--
-CREATE INDEX idx_command_data_type_1 ON qiita.command_data_type ( data_type_id );
+CREATE TABLE qiita.command_output (
+ command_output_id bigint NOT NULL,
+ name character varying NOT NULL,
+ command_id bigint NOT NULL,
+ artifact_type_id bigint NOT NULL,
+ check_biom_merge boolean DEFAULT false NOT NULL
+);
-CREATE TABLE qiita.filepath (
- filepath_id bigserial NOT NULL,
- filepath varchar NOT NULL,
- filepath_type_id bigint NOT NULL,
- checksum varchar NOT NULL,
- checksum_algorithm_id bigint NOT NULL,
- data_directory_id bigserial ,
- CONSTRAINT pk_filepath PRIMARY KEY ( filepath_id ),
- CONSTRAINT fk_filepath FOREIGN KEY ( filepath_type_id ) REFERENCES qiita.filepath_type( filepath_type_id ) ,
- CONSTRAINT fk_filepath_0 FOREIGN KEY ( checksum_algorithm_id ) REFERENCES qiita.checksum_algorithm( checksum_algorithm_id ) ,
- CONSTRAINT fk_filepath_data_directory FOREIGN KEY ( data_directory_id ) REFERENCES qiita.data_directory( data_directory_id ) ON DELETE RESTRICT ON UPDATE RESTRICT
- );
-
-CREATE INDEX idx_filepath ON qiita.filepath ( filepath_type_id );
-
-CREATE INDEX idx_filepath_0 ON qiita.filepath ( data_directory_id );
-CREATE TABLE qiita.investigation (
- investigation_id bigserial NOT NULL,
- name varchar NOT NULL,
- description varchar NOT NULL,
- contact_person_id bigint ,
- CONSTRAINT pk_investigation PRIMARY KEY ( investigation_id ),
- CONSTRAINT fk_investigation_study_person FOREIGN KEY ( contact_person_id ) REFERENCES qiita.study_person( study_person_id )
- );
-CREATE INDEX idx_investigation ON qiita.investigation ( contact_person_id );
+--
+-- Name: command_output_command_output_id_seq; Type: SEQUENCE; Schema: qiita
+--
-COMMENT ON TABLE qiita.investigation IS 'Overarching investigation information.An investigation comprises one or more individual studies.';
+CREATE SEQUENCE qiita.command_output_command_output_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-COMMENT ON COLUMN qiita.investigation.description IS 'Describes the overarching goal of the investigation';
-CREATE TABLE qiita.logging (
- logging_id bigserial NOT NULL,
- time timestamp NOT NULL,
- severity_id integer NOT NULL,
- msg varchar NOT NULL,
- information varchar ,
- CONSTRAINT pk_logging PRIMARY KEY ( logging_id ),
- CONSTRAINT fk_logging_severity FOREIGN KEY ( severity_id ) REFERENCES qiita.severity( severity_id )
- );
-CREATE INDEX idx_logging_0 ON qiita.logging ( severity_id );
+--
+-- Name: command_output_command_output_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.logging.time IS 'Time the error was thrown';
+ALTER SEQUENCE qiita.command_output_command_output_id_seq OWNED BY qiita.command_output.command_output_id;
-COMMENT ON COLUMN qiita.logging.msg IS 'Error message thrown';
-COMMENT ON COLUMN qiita.logging.information IS 'Other applicable information (depending on error)';
+--
+-- Name: command_parameter; Type: TABLE; Schema: qiita
+--
-CREATE TABLE qiita.prep_template (
- prep_template_id bigserial NOT NULL,
- data_type_id bigint NOT NULL,
- raw_data_id bigint NOT NULL,
- preprocessing_status varchar DEFAULT 'not_preprocessed' NOT NULL,
- investigation_type varchar ,
- CONSTRAINT pk_prep_template PRIMARY KEY ( prep_template_id ),
- CONSTRAINT fk_prep_template_data_type FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id ) ,
- CONSTRAINT fk_prep_template_raw_data FOREIGN KEY ( raw_data_id ) REFERENCES qiita.raw_data( raw_data_id )
- );
+CREATE TABLE qiita.command_parameter (
+ command_id bigint NOT NULL,
+ parameter_name character varying NOT NULL,
+ parameter_type character varying NOT NULL,
+ required boolean NOT NULL,
+ default_value character varying,
+ command_parameter_id bigint NOT NULL,
+ name_order integer,
+ check_biom_merge boolean DEFAULT false NOT NULL
+);
-CREATE INDEX idx_prep_template ON qiita.prep_template ( data_type_id );
-CREATE INDEX idx_prep_template_0 ON qiita.prep_template ( raw_data_id );
-COMMENT ON COLUMN qiita.prep_template.investigation_type IS 'The investigation type (e.g., one of the values from EBI`s set of known types)';
+--
+-- Name: command_parameter_command_parameter_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE TABLE qiita.prep_template_preprocessed_data (
- prep_template_id bigint NOT NULL,
- preprocessed_data_id bigint NOT NULL,
- CONSTRAINT idx_prep_template_preprocessed_data PRIMARY KEY ( prep_template_id, preprocessed_data_id ),
- CONSTRAINT fk_prep_template_id FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id ) ,
- CONSTRAINT fk_prep_template_preprocessed_data FOREIGN KEY ( preprocessed_data_id ) REFERENCES qiita.preprocessed_data( preprocessed_data_id )
- );
+CREATE SEQUENCE qiita.command_parameter_command_parameter_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE INDEX idx_prep_template_preprocessed_data_0 ON qiita.prep_template_preprocessed_data ( prep_template_id );
-CREATE INDEX idx_prep_template_preprocessed_data_1 ON qiita.prep_template_preprocessed_data ( preprocessed_data_id );
-CREATE TABLE qiita.preprocessed_filepath (
- preprocessed_data_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_preprocessed_filepath PRIMARY KEY ( preprocessed_data_id, filepath_id ),
- CONSTRAINT fk_preprocessed_filepath FOREIGN KEY ( preprocessed_data_id ) REFERENCES qiita.preprocessed_data( preprocessed_data_id ) ,
- CONSTRAINT fk_preprocessed_filepath_0 FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id )
- );
+--
+-- Name: command_parameter_command_parameter_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-CREATE INDEX idx_preprocessed_filepath_0 ON qiita.preprocessed_filepath ( preprocessed_data_id );
+ALTER SEQUENCE qiita.command_parameter_command_parameter_id_seq OWNED BY qiita.command_parameter.command_parameter_id;
-CREATE INDEX idx_preprocessed_filepath_1 ON qiita.preprocessed_filepath ( filepath_id );
-CREATE TABLE qiita.preprocessed_processed_data (
- preprocessed_data_id bigint NOT NULL,
- processed_data_id bigint NOT NULL,
- CONSTRAINT idx_preprocessed_processed_data PRIMARY KEY ( preprocessed_data_id, processed_data_id ),
- CONSTRAINT fk_preprocessed_processed_data FOREIGN KEY ( preprocessed_data_id ) REFERENCES qiita.preprocessed_data( preprocessed_data_id ) ,
- CONSTRAINT fk_preprocessed_processed_data_0 FOREIGN KEY ( processed_data_id ) REFERENCES qiita.processed_data( processed_data_id )
- );
+--
+-- Name: controlled_vocab; Type: TABLE; Schema: qiita
+--
-CREATE INDEX idx_preprocessed_processed_data_0 ON qiita.preprocessed_processed_data ( preprocessed_data_id );
+CREATE TABLE qiita.controlled_vocab (
+ controlled_vocab_id bigint NOT NULL,
+ controlled_vocab character varying NOT NULL
+);
-CREATE INDEX idx_preprocessed_processed_data_1 ON qiita.preprocessed_processed_data ( processed_data_id );
-CREATE TABLE qiita.processed_filepath (
- processed_data_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_processed_filepath PRIMARY KEY ( processed_data_id, filepath_id ),
- CONSTRAINT fk_processed_data_filepath FOREIGN KEY ( processed_data_id ) REFERENCES qiita.processed_data( processed_data_id ) ,
- CONSTRAINT fk_processed_data_filepath_0 FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id )
- );
-CREATE TABLE qiita.qiita_user (
- email varchar NOT NULL,
- user_level_id integer DEFAULT 5 NOT NULL,
- password varchar NOT NULL,
- name varchar ,
- affiliation varchar ,
- address varchar ,
- phone varchar ,
- user_verify_code varchar ,
- pass_reset_code varchar ,
- pass_reset_timestamp timestamp ,
- CONSTRAINT pk_user PRIMARY KEY ( email ),
- CONSTRAINT fk_user_user_level FOREIGN KEY ( user_level_id ) REFERENCES qiita.user_level( user_level_id ) ON UPDATE RESTRICT
- );
-
-CREATE INDEX idx_user ON qiita.qiita_user ( user_level_id );
+--
+-- Name: controlled_vocab_controlled_vocab_id_seq; Type: SEQUENCE; Schema: qiita
+--
-COMMENT ON TABLE qiita.qiita_user IS 'Holds all user information';
+CREATE SEQUENCE qiita.controlled_vocab_controlled_vocab_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-COMMENT ON COLUMN qiita.qiita_user.user_level_id IS 'user level';
-COMMENT ON COLUMN qiita.qiita_user.user_verify_code IS 'Code for initial user email verification';
-COMMENT ON COLUMN qiita.qiita_user.pass_reset_code IS 'Randomly generated code for password reset';
+--
+-- Name: controlled_vocab_controlled_vocab_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.qiita_user.pass_reset_timestamp IS 'Time the reset code was generated';
+ALTER SEQUENCE qiita.controlled_vocab_controlled_vocab_id_seq OWNED BY qiita.controlled_vocab.controlled_vocab_id;
-CREATE TABLE qiita.raw_filepath (
- raw_data_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_raw_filepath PRIMARY KEY ( raw_data_id, filepath_id ),
- CONSTRAINT fk_raw_filepath FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id ) ,
- CONSTRAINT fk_raw_filepath_0 FOREIGN KEY ( raw_data_id ) REFERENCES qiita.raw_data( raw_data_id )
- );
-CREATE INDEX idx_raw_filepath_0 ON qiita.raw_filepath ( filepath_id );
+--
+-- Name: controlled_vocab_values; Type: TABLE; Schema: qiita
+--
-CREATE INDEX idx_raw_filepath_1 ON qiita.raw_filepath ( raw_data_id );
+CREATE TABLE qiita.controlled_vocab_values (
+ vocab_value_id bigint NOT NULL,
+ controlled_vocab_id bigint NOT NULL,
+ term character varying NOT NULL,
+ order_by character varying NOT NULL,
+ default_item character varying
+);
-CREATE TABLE qiita.reference (
- reference_id bigserial NOT NULL,
- reference_name varchar NOT NULL,
- reference_version varchar ,
- sequence_filepath bigint NOT NULL,
- taxonomy_filepath bigint ,
- tree_filepath bigint ,
- CONSTRAINT pk_reference PRIMARY KEY ( reference_id ),
- CONSTRAINT fk_reference_sequence_filepath FOREIGN KEY ( sequence_filepath ) REFERENCES qiita.filepath( filepath_id ) ,
- CONSTRAINT fk_reference_taxonomy_filepath FOREIGN KEY ( taxonomy_filepath ) REFERENCES qiita.filepath( filepath_id ) ,
- CONSTRAINT fk_reference_tree_filepath FOREIGN KEY ( tree_filepath ) REFERENCES qiita.filepath( filepath_id )
- );
-CREATE INDEX idx_reference ON qiita.reference ( sequence_filepath );
-CREATE INDEX idx_reference_0 ON qiita.reference ( taxonomy_filepath );
+--
+-- Name: controlled_vocab_values_vocab_value_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE INDEX idx_reference_1 ON qiita.reference ( tree_filepath );
+CREATE SEQUENCE qiita.controlled_vocab_values_vocab_value_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE TABLE qiita.study (
- study_id bigserial NOT NULL,
- email varchar NOT NULL,
- study_status_id bigint NOT NULL,
- emp_person_id bigint ,
- first_contact timestamp DEFAULT current_timestamp NOT NULL,
- funding varchar ,
- timeseries_type_id bigint NOT NULL,
- lab_person_id bigint ,
- metadata_complete bool NOT NULL,
- mixs_compliant bool NOT NULL,
- most_recent_contact timestamp ,
- portal_type_id bigint NOT NULL,
- principal_investigator_id bigint NOT NULL,
- reprocess bool NOT NULL,
- spatial_series bool ,
- study_title varchar NOT NULL,
- study_alias varchar NOT NULL,
- study_description text NOT NULL,
- study_abstract text NOT NULL,
- vamps_id varchar ,
- CONSTRAINT pk_study PRIMARY KEY ( study_id ),
- CONSTRAINT unique_study_title UNIQUE ( study_title ) ,
- CONSTRAINT fk_study_user FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ,
- CONSTRAINT fk_study_study_status FOREIGN KEY ( study_status_id ) REFERENCES qiita.study_status( study_status_id ) ,
- CONSTRAINT fk_study_study_emp_person FOREIGN KEY ( emp_person_id ) REFERENCES qiita.study_person( study_person_id ) ,
- CONSTRAINT fk_study_study_lab_person FOREIGN KEY ( lab_person_id ) REFERENCES qiita.study_person( study_person_id ) ,
- CONSTRAINT fk_study_study_pi_person FOREIGN KEY ( principal_investigator_id ) REFERENCES qiita.study_person( study_person_id ) ,
- CONSTRAINT fk_study_timeseries_type FOREIGN KEY ( timeseries_type_id ) REFERENCES qiita.timeseries_type( timeseries_type_id ) ,
- CONSTRAINT fk_study FOREIGN KEY ( portal_type_id ) REFERENCES qiita.portal_type( portal_type_id )
- );
-
-CREATE INDEX idx_study ON qiita.study ( email );
-
-CREATE INDEX idx_study_0 ON qiita.study ( study_status_id );
-
-CREATE INDEX idx_study_1 ON qiita.study ( emp_person_id );
-
-CREATE INDEX idx_study_2 ON qiita.study ( lab_person_id );
-
-CREATE INDEX idx_study_3 ON qiita.study ( principal_investigator_id );
-
-CREATE INDEX idx_study_4 ON qiita.study ( timeseries_type_id );
-
-CREATE INDEX idx_study_5 ON qiita.study ( portal_type_id );
-COMMENT ON COLUMN qiita.study.study_id IS 'Unique name for study';
-COMMENT ON COLUMN qiita.study.email IS 'Email of study owner';
+--
+-- Name: controlled_vocab_values_vocab_value_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.study.timeseries_type_id IS 'What type of timeseries this study is (or is not)
-Controlled Vocabulary';
+ALTER SEQUENCE qiita.controlled_vocab_values_vocab_value_id_seq OWNED BY qiita.controlled_vocab_values.vocab_value_id;
-CREATE TABLE qiita.study_experimental_factor (
- study_id bigint NOT NULL,
- efo_id bigint NOT NULL,
- CONSTRAINT idx_study_experimental_factor PRIMARY KEY ( study_id, efo_id ),
- CONSTRAINT fk_study_experimental_factor FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
- );
-CREATE INDEX idx_study_experimental_factor_0 ON qiita.study_experimental_factor ( study_id );
+--
+-- Name: data_directory; Type: TABLE; Schema: qiita
+--
-COMMENT ON TABLE qiita.study_experimental_factor IS 'EFO ontological link of experimental factors to studies';
+CREATE TABLE qiita.data_directory (
+ data_directory_id bigint NOT NULL,
+ data_type character varying NOT NULL,
+ mountpoint character varying NOT NULL,
+ subdirectory boolean DEFAULT false NOT NULL,
+ active boolean NOT NULL
+);
-CREATE TABLE qiita.study_pmid (
- study_id bigint NOT NULL,
- pmid varchar NOT NULL,
- CONSTRAINT idx_study_pmid PRIMARY KEY ( study_id, pmid ),
- CONSTRAINT fk_study_pmid_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
- );
-CREATE INDEX idx_study_pmid_0 ON qiita.study_pmid ( study_id );
-COMMENT ON TABLE qiita.study_pmid IS 'Links a study to all PMIDs for papers created from study';
+--
+-- Name: data_directory_data_directory_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE TABLE qiita.study_preprocessed_data (
- study_id bigint NOT NULL,
- preprocessed_data_id bigint NOT NULL,
- CONSTRAINT idx_study_preprocessed_data PRIMARY KEY ( study_id, preprocessed_data_id ),
- CONSTRAINT fk_study_preprocessed_data FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ,
- CONSTRAINT fk_study_preprocessed_data_0 FOREIGN KEY ( preprocessed_data_id ) REFERENCES qiita.preprocessed_data( preprocessed_data_id )
- );
+CREATE SEQUENCE qiita.data_directory_data_directory_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE INDEX idx_study_preprocessed_data_0 ON qiita.study_preprocessed_data ( study_id );
-CREATE INDEX idx_study_preprocessed_data_1 ON qiita.study_preprocessed_data ( preprocessed_data_id );
-CREATE TABLE qiita.study_processed_data (
- study_id bigint NOT NULL,
- processed_data_id bigint NOT NULL,
- CONSTRAINT idx_study_processed_data PRIMARY KEY ( study_id, processed_data_id ),
- CONSTRAINT pk_study_processed_data UNIQUE ( processed_data_id ) ,
- CONSTRAINT fk_study_processed_data FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ,
- CONSTRAINT fk_study_processed_data_0 FOREIGN KEY ( processed_data_id ) REFERENCES qiita.processed_data( processed_data_id )
- );
+--
+-- Name: data_directory_data_directory_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-CREATE INDEX idx_study_processed_data_0 ON qiita.study_processed_data ( study_id );
+ALTER SEQUENCE qiita.data_directory_data_directory_id_seq OWNED BY qiita.data_directory.data_directory_id;
-CREATE TABLE qiita.study_raw_data (
- study_id bigint NOT NULL,
- raw_data_id bigint NOT NULL,
- CONSTRAINT idx_study_raw_data_0 PRIMARY KEY ( study_id, raw_data_id ),
- CONSTRAINT fk_study_raw_data_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ,
- CONSTRAINT fk_study_raw_data_raw_data FOREIGN KEY ( raw_data_id ) REFERENCES qiita.raw_data( raw_data_id )
- );
-CREATE INDEX idx_study_raw_data ON qiita.study_raw_data ( study_id );
+--
+-- Name: data_type; Type: TABLE; Schema: qiita
+--
-COMMENT ON TABLE qiita.study_raw_data IS 'links study to its raw data';
+CREATE TABLE qiita.data_type (
+ data_type_id bigint NOT NULL,
+ data_type character varying NOT NULL
+);
-CREATE TABLE qiita.study_sample_columns (
- study_id bigint NOT NULL,
- column_name varchar(100) NOT NULL,
- column_type varchar NOT NULL,
- CONSTRAINT idx_study_mapping_columns PRIMARY KEY ( study_id, column_name, column_type ),
- CONSTRAINT fk_study_mapping_columns_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
- );
-CREATE INDEX idx_study_mapping_columns_study_id ON qiita.study_sample_columns ( study_id );
-COMMENT ON TABLE qiita.study_sample_columns IS 'Holds information on which metadata columns are available for the study sample template';
+--
+-- Name: COLUMN data_type.data_type; Type: COMMENT; Schema: qiita
+--
-CREATE TABLE qiita.study_users (
- study_id bigint NOT NULL,
- email varchar NOT NULL,
- CONSTRAINT idx_study_users PRIMARY KEY ( study_id, email ),
- CONSTRAINT fk_study_users_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id ) ,
- CONSTRAINT fk_study_users_user FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email )
- );
+COMMENT ON COLUMN qiita.data_type.data_type IS 'Data type (16S, metabolome, etc) the job will use';
-CREATE INDEX idx_study_users_0 ON qiita.study_users ( study_id );
-CREATE INDEX idx_study_users_1 ON qiita.study_users ( email );
+--
+-- Name: data_type_data_type_id_seq; Type: SEQUENCE; Schema: qiita
+--
-COMMENT ON TABLE qiita.study_users IS 'Links shared studies to users they are shared with';
+CREATE SEQUENCE qiita.data_type_data_type_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE TABLE qiita.analysis (
- analysis_id bigserial NOT NULL,
- email varchar NOT NULL,
- name varchar NOT NULL,
- description varchar NOT NULL,
- analysis_status_id bigint NOT NULL,
- pmid varchar ,
- timestamp timestamptz DEFAULT current_timestamp ,
- CONSTRAINT pk_analysis PRIMARY KEY ( analysis_id ),
- CONSTRAINT fk_analysis_user FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ,
- CONSTRAINT fk_analysis_analysis_status FOREIGN KEY ( analysis_status_id ) REFERENCES qiita.analysis_status( analysis_status_id )
- );
-
-CREATE INDEX idx_analysis_email ON qiita.analysis ( email );
-
-CREATE INDEX idx_analysis_status_id ON qiita.analysis ( analysis_status_id );
-COMMENT ON TABLE qiita.analysis IS 'hHolds analysis information';
-COMMENT ON COLUMN qiita.analysis.analysis_id IS 'Unique identifier for analysis';
+--
+-- Name: data_type_data_type_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.analysis.email IS 'Email for user who owns the analysis';
+ALTER SEQUENCE qiita.data_type_data_type_id_seq OWNED BY qiita.data_type.data_type_id;
-COMMENT ON COLUMN qiita.analysis.name IS 'Name of the analysis';
-COMMENT ON COLUMN qiita.analysis.pmid IS 'PMID of paper from the analysis';
+--
+-- Name: default_parameter_set; Type: TABLE; Schema: qiita
+--
-CREATE TABLE qiita.analysis_chain (
- parent_id bigint NOT NULL,
- child_id bigint NOT NULL,
- CONSTRAINT idx_analysis_chain_1 PRIMARY KEY ( parent_id, child_id ),
- CONSTRAINT fk_analysis_chain FOREIGN KEY ( parent_id ) REFERENCES qiita.analysis( analysis_id ) ,
- CONSTRAINT fk_analysis_chain_0 FOREIGN KEY ( child_id ) REFERENCES qiita.analysis( analysis_id )
- );
+CREATE TABLE qiita.default_parameter_set (
+ default_parameter_set_id bigint NOT NULL,
+ command_id bigint NOT NULL,
+ parameter_set_name character varying NOT NULL,
+ parameter_set json NOT NULL
+);
-CREATE INDEX idx_analysis_chain ON qiita.analysis_chain ( parent_id );
-CREATE INDEX idx_analysis_chain_0 ON qiita.analysis_chain ( child_id );
-COMMENT ON TABLE qiita.analysis_chain IS 'Keeps track of the chain of analysis edits. Tracks what previous analysis a given analysis came from.If a given analysis is not in child_id, it is the root of the chain. ';
+--
+-- Name: default_parameter_set_default_parameter_set_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE TABLE qiita.analysis_filepath (
- analysis_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- data_type_id bigint ,
- CONSTRAINT idx_analysis_filepath_1 PRIMARY KEY ( analysis_id, filepath_id ),
- CONSTRAINT fk_analysis_filepath FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ,
- CONSTRAINT fk_analysis_filepath_0 FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id ) ,
- CONSTRAINT fk_analysis_filepath_1 FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id )
- );
+CREATE SEQUENCE qiita.default_parameter_set_default_parameter_set_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE INDEX idx_analysis_filepath ON qiita.analysis_filepath ( analysis_id );
-CREATE INDEX idx_analysis_filepath_0 ON qiita.analysis_filepath ( filepath_id );
-CREATE INDEX idx_analysis_filepath_2 ON qiita.analysis_filepath ( data_type_id );
+--
+-- Name: default_parameter_set_default_parameter_set_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON TABLE qiita.analysis_filepath IS 'Stores link between analysis and the data file used for the analysis.';
+ALTER SEQUENCE qiita.default_parameter_set_default_parameter_set_id_seq OWNED BY qiita.default_parameter_set.default_parameter_set_id;
-CREATE TABLE qiita.analysis_users (
- analysis_id bigint NOT NULL,
- email varchar NOT NULL,
- CONSTRAINT idx_analysis_users PRIMARY KEY ( analysis_id, email ),
- CONSTRAINT fk_analysis_users_analysis FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ON DELETE CASCADE ON UPDATE CASCADE,
- CONSTRAINT fk_analysis_users_user FOREIGN KEY ( email ) REFERENCES qiita.qiita_user( email ) ON DELETE CASCADE ON UPDATE CASCADE
- );
-CREATE INDEX idx_analysis_users_analysis ON qiita.analysis_users ( analysis_id );
+--
+-- Name: default_workflow; Type: TABLE; Schema: qiita
+--
-CREATE INDEX idx_analysis_users_email ON qiita.analysis_users ( email );
+CREATE TABLE qiita.default_workflow (
+ default_workflow_id bigint NOT NULL,
+ name character varying NOT NULL,
+ active boolean DEFAULT true,
+ description text,
+ artifact_type_id bigint DEFAULT 3 NOT NULL,
+ parameters jsonb DEFAULT '{"prep": {}, "sample": {}}'::jsonb NOT NULL
+);
-COMMENT ON TABLE qiita.analysis_users IS 'Links analyses to the users they are shared with';
-CREATE TABLE qiita.analysis_workflow (
- analysis_id bigint NOT NULL,
- step integer NOT NULL,
- CONSTRAINT pk_analysis_workflow PRIMARY KEY ( analysis_id ),
- CONSTRAINT fk_analysis_workflow FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id )
- );
-COMMENT ON TABLE qiita.analysis_workflow IS 'Stores what step in_production analyses are on.';
+--
+-- Name: default_workflow_data_type; Type: TABLE; Schema: qiita
+--
-CREATE TABLE qiita.investigation_study (
- investigation_id bigint NOT NULL,
- study_id bigint NOT NULL,
- CONSTRAINT idx_investigation_study PRIMARY KEY ( investigation_id, study_id ),
- CONSTRAINT fk_investigation_study FOREIGN KEY ( investigation_id ) REFERENCES qiita.investigation( investigation_id ) ,
- CONSTRAINT fk_investigation_study_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
- );
+CREATE TABLE qiita.default_workflow_data_type (
+ default_workflow_id bigint NOT NULL,
+ data_type_id bigint NOT NULL
+);
-CREATE INDEX idx_investigation_study_investigation ON qiita.investigation_study ( investigation_id );
-CREATE INDEX idx_investigation_study_study ON qiita.investigation_study ( study_id );
-CREATE TABLE qiita.job (
- job_id bigserial NOT NULL,
- data_type_id bigint NOT NULL,
- job_status_id bigint NOT NULL,
- command_id bigint NOT NULL,
- options varchar ,
- log_id bigint ,
- CONSTRAINT pk_job PRIMARY KEY ( job_id ),
- CONSTRAINT fk_job_function FOREIGN KEY ( command_id ) REFERENCES qiita.command( command_id ) ,
- CONSTRAINT fk_job_job_status_id FOREIGN KEY ( job_status_id ) REFERENCES qiita.job_status( job_status_id ) ,
- CONSTRAINT fk_job_data_type FOREIGN KEY ( data_type_id ) REFERENCES qiita.data_type( data_type_id ) ,
- CONSTRAINT fk_job FOREIGN KEY ( log_id ) REFERENCES qiita.logging( logging_id )
- );
+--
+-- Name: default_workflow_default_workflow_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE INDEX idx_job_command ON qiita.job ( command_id );
+CREATE SEQUENCE qiita.default_workflow_default_workflow_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE INDEX idx_job_status ON qiita.job ( job_status_id );
-CREATE INDEX idx_job_type ON qiita.job ( data_type_id );
-CREATE INDEX idx_job ON qiita.job ( log_id );
+--
+-- Name: default_workflow_default_workflow_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.job.job_id IS 'Unique identifier for job';
+ALTER SEQUENCE qiita.default_workflow_default_workflow_id_seq OWNED BY qiita.default_workflow.default_workflow_id;
-COMMENT ON COLUMN qiita.job.data_type_id IS 'What datatype (16s, metabolome, etc) job is run on.';
-COMMENT ON COLUMN qiita.job.command_id IS 'The Qiime or other function being run (alpha diversity, etc)';
+--
+-- Name: default_workflow_edge; Type: TABLE; Schema: qiita
+--
-COMMENT ON COLUMN qiita.job.options IS 'Holds all options set for the job as a json string';
+CREATE TABLE qiita.default_workflow_edge (
+ default_workflow_edge_id bigint NOT NULL,
+ parent_id bigint NOT NULL,
+ child_id bigint NOT NULL
+);
-COMMENT ON COLUMN qiita.job.log_id IS 'Reference to error if status is error';
-CREATE TABLE qiita.job_results_filepath (
- job_id bigint NOT NULL,
- filepath_id bigint NOT NULL,
- CONSTRAINT idx_job_results_filepath PRIMARY KEY ( job_id, filepath_id ),
- CONSTRAINT fk_job_results_filepath FOREIGN KEY ( job_id ) REFERENCES qiita.job( job_id ) ,
- CONSTRAINT fk_job_results_filepath_0 FOREIGN KEY ( filepath_id ) REFERENCES qiita.filepath( filepath_id )
- );
-CREATE INDEX idx_job_results_filepath_0 ON qiita.job_results_filepath ( job_id );
+--
+-- Name: default_workflow_edge_connections; Type: TABLE; Schema: qiita
+--
-CREATE INDEX idx_job_results_filepath_1 ON qiita.job_results_filepath ( filepath_id );
+CREATE TABLE qiita.default_workflow_edge_connections (
+ default_workflow_edge_id bigint NOT NULL,
+ parent_output_id bigint NOT NULL,
+ child_input_id bigint NOT NULL
+);
-COMMENT ON TABLE qiita.job_results_filepath IS 'Holds connection between jobs and the result filepaths';
-CREATE TABLE qiita.prep_columns (
- prep_template_id bigint NOT NULL,
- column_name varchar NOT NULL,
- column_type varchar NOT NULL,
- CONSTRAINT idx_prep_columns_0 PRIMARY KEY ( prep_template_id, column_name, column_type ),
- CONSTRAINT fk_prep_columns_prep_template FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id )
- );
-CREATE INDEX idx_prep_columns_1 ON qiita.prep_columns ( prep_template_id );
+--
+-- Name: default_workflow_edge_default_workflow_edge_id_seq; Type: SEQUENCE; Schema: qiita
+--
-CREATE TABLE qiita.processed_params_sortmerna (
- processed_params_id bigserial NOT NULL,
- reference_id bigint NOT NULL,
- evalue float8 NOT NULL,
- max_pos integer NOT NULL,
- similarity float8 NOT NULL,
- coverage float8 NOT NULL,
- threads integer NOT NULL,
- CONSTRAINT pk_processed_params_sortmerna PRIMARY KEY ( processed_params_id ),
- CONSTRAINT fk_processed_params_sortmerna FOREIGN KEY ( reference_id ) REFERENCES qiita.reference( reference_id )
- );
+CREATE SEQUENCE qiita.default_workflow_edge_default_workflow_edge_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-CREATE INDEX idx_processed_params_sortmerna ON qiita.processed_params_sortmerna ( reference_id );
-COMMENT ON TABLE qiita.processed_params_sortmerna IS 'Parameters used for processing data using method sortmerna';
-COMMENT ON COLUMN qiita.processed_params_sortmerna.reference_id IS 'What version of reference or type of reference used';
+--
+-- Name: default_workflow_edge_default_workflow_edge_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-CREATE TABLE qiita.processed_params_uclust (
- processed_params_id bigserial NOT NULL,
- reference_id bigint NOT NULL,
- similarity float8 DEFAULT 0.97 NOT NULL,
- enable_rev_strand_match bool DEFAULT TRUE NOT NULL,
- suppress_new_clusters bool DEFAULT TRUE NOT NULL,
- CONSTRAINT pk_processed_params_uclust PRIMARY KEY ( processed_params_id ),
- CONSTRAINT fk_processed_params_uclust FOREIGN KEY ( reference_id ) REFERENCES qiita.reference( reference_id )
- );
+ALTER SEQUENCE qiita.default_workflow_edge_default_workflow_edge_id_seq OWNED BY qiita.default_workflow_edge.default_workflow_edge_id;
-CREATE INDEX idx_processed_params_uclust ON qiita.processed_params_uclust ( reference_id );
-COMMENT ON TABLE qiita.processed_params_uclust IS 'Parameters used for processing data using method uclust';
+--
+-- Name: default_workflow_node; Type: TABLE; Schema: qiita
+--
-COMMENT ON COLUMN qiita.processed_params_uclust.reference_id IS 'What version of reference or type of reference used';
+CREATE TABLE qiita.default_workflow_node (
+ default_workflow_node_id bigint NOT NULL,
+ default_workflow_id bigint NOT NULL,
+ default_parameter_set_id bigint NOT NULL
+);
-CREATE TABLE qiita.required_sample_info (
- sample_id varchar NOT NULL,
- study_id bigint NOT NULL,
- physical_location varchar NOT NULL,
- has_physical_specimen bool NOT NULL,
- has_extracted_data bool NOT NULL,
- sample_type varchar NOT NULL,
- required_sample_info_status_id bigint NOT NULL,
- collection_timestamp timestamp NOT NULL,
- host_subject_id varchar NOT NULL,
- description varchar NOT NULL,
- latitude float8 NOT NULL,
- longitude float8 NOT NULL,
- CONSTRAINT idx_required_sample_info_1 PRIMARY KEY ( sample_id ),
- CONSTRAINT fk_required_sample_info FOREIGN KEY ( required_sample_info_status_id ) REFERENCES qiita.required_sample_info_status( required_sample_info_status_id ) ,
- CONSTRAINT fk_required_sample_info_study FOREIGN KEY ( study_id ) REFERENCES qiita.study( study_id )
- );
-CREATE INDEX idx_required_sample_info_0 ON qiita.required_sample_info ( required_sample_info_status_id );
-CREATE INDEX idx_required_sample_info ON qiita.required_sample_info ( study_id );
+--
+-- Name: default_workflow_node_default_workflow_node_id_seq; Type: SEQUENCE; Schema: qiita
+--
-COMMENT ON TABLE qiita.required_sample_info IS 'Required info for each sample. One row is one sample.';
+CREATE SEQUENCE qiita.default_workflow_node_default_workflow_node_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
-COMMENT ON COLUMN qiita.required_sample_info.physical_location IS 'Where the sample itself is stored';
-COMMENT ON COLUMN qiita.required_sample_info.has_physical_specimen IS 'Whether we have the full speciment or just DNA';
-COMMENT ON COLUMN qiita.required_sample_info.sample_type IS 'Controlled vocabulary of sample types';
+--
+-- Name: default_workflow_node_default_workflow_node_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
-COMMENT ON COLUMN qiita.required_sample_info.required_sample_info_status_id IS 'What step of the pipeline the samples are in';
+ALTER SEQUENCE qiita.default_workflow_node_default_workflow_node_id_seq OWNED BY qiita.default_workflow_node.default_workflow_node_id;
-COMMENT ON COLUMN qiita.required_sample_info.latitude IS 'Latitude of the collection site';
-COMMENT ON COLUMN qiita.required_sample_info.longitude IS 'Longitude of the collection site';
+--
+-- Name: download_link; Type: TABLE; Schema: qiita
+--
-CREATE TABLE qiita.analysis_job (
- analysis_id bigint NOT NULL,
- job_id bigint NOT NULL,
- CONSTRAINT idx_analysis_jobs PRIMARY KEY ( analysis_id, job_id ),
- CONSTRAINT fk_analysis_job_analysis FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ON DELETE CASCADE ON UPDATE CASCADE,
- CONSTRAINT fk_analysis_job_job FOREIGN KEY ( job_id ) REFERENCES qiita.job( job_id )
- );
+CREATE TABLE qiita.download_link (
+ jti character varying(32) NOT NULL,
+ jwt text NOT NULL,
+ exp timestamp without time zone NOT NULL
+);
-CREATE INDEX idx_analysis_job ON qiita.analysis_job ( analysis_id );
-CREATE INDEX idx_analysis_job_0 ON qiita.analysis_job ( job_id );
-COMMENT ON TABLE qiita.analysis_job IS 'Holds information for a one-to-many relation of analysis to the jobs in it';
+--
+-- Name: ebi_run_accession; Type: TABLE; Schema: qiita
+--
-COMMENT ON COLUMN qiita.analysis_job.analysis_id IS 'Id of the analysis';
+CREATE TABLE qiita.ebi_run_accession (
+ sample_id character varying NOT NULL,
+ ebi_run_accession character varying NOT NULL,
+ artifact_id bigint NOT NULL
+);
-COMMENT ON COLUMN qiita.analysis_job.job_id IS 'Id for a job that is part of the analysis';
-CREATE TABLE qiita.analysis_sample (
- analysis_id bigint NOT NULL,
- processed_data_id bigint NOT NULL,
- sample_id varchar NOT NULL,
- CONSTRAINT fk_analysis_sample_analysis FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ,
- CONSTRAINT fk_analysis_processed_data FOREIGN KEY ( processed_data_id ) REFERENCES qiita.processed_data( processed_data_id ) ,
- CONSTRAINT fk_analysis_sample FOREIGN KEY ( sample_id ) REFERENCES qiita.required_sample_info( sample_id )
- );
-CREATE INDEX idx_analysis_sample ON qiita.analysis_sample ( analysis_id );
+--
+-- Name: environmental_package; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.environmental_package (
+ environmental_package_name character varying NOT NULL,
+ metadata_table character varying NOT NULL
+);
+
+
+
+--
+-- Name: COLUMN environmental_package.environmental_package_name; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.environmental_package.environmental_package_name IS 'The name of the environmental package';
+
+
+--
+-- Name: COLUMN environmental_package.metadata_table; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.environmental_package.metadata_table IS 'Contains the name of the table that contains the pre-defined metadata columns for the environmental package';
+
+
+--
+-- Name: filepath; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.filepath (
+ filepath_id bigint NOT NULL,
+ filepath character varying NOT NULL,
+ filepath_type_id bigint NOT NULL,
+ checksum character varying NOT NULL,
+ checksum_algorithm_id bigint NOT NULL,
+ data_directory_id bigint NOT NULL,
+ fp_size bigint DEFAULT 0 NOT NULL
+);
+
+
+
+--
+-- Name: filepath_data_directory_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.filepath_data_directory_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: filepath_data_directory_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.filepath_data_directory_id_seq OWNED BY qiita.filepath.data_directory_id;
+
+
+--
+-- Name: filepath_filepath_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.filepath_filepath_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: filepath_filepath_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.filepath_filepath_id_seq OWNED BY qiita.filepath.filepath_id;
+
+
+--
+-- Name: filepath_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.filepath_type (
+ filepath_type_id bigint NOT NULL,
+ filepath_type character varying
+);
+
+
+
+--
+-- Name: filepath_type_filepath_type_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.filepath_type_filepath_type_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: filepath_type_filepath_type_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.filepath_type_filepath_type_id_seq OWNED BY qiita.filepath_type.filepath_type_id;
+
+
+--
+-- Name: filetype_filetype_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.filetype_filetype_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: filetype_filetype_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.filetype_filetype_id_seq OWNED BY qiita.artifact_type.artifact_type_id;
+
+
+--
+-- Name: investigation; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.investigation (
+ investigation_id bigint NOT NULL,
+ investigation_name character varying NOT NULL,
+ investigation_description character varying NOT NULL,
+ contact_person_id bigint
+);
+
+
+
+--
+-- Name: TABLE investigation; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.investigation IS 'Overarching investigation information.An investigation comprises one or more individual studies.';
+
+
+--
+-- Name: COLUMN investigation.investigation_description; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.investigation.investigation_description IS 'Describes the overarching goal of the investigation';
+
+
+--
+-- Name: investigation_investigation_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.investigation_investigation_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: investigation_investigation_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.investigation_investigation_id_seq OWNED BY qiita.investigation.investigation_id;
+
+
+--
+-- Name: investigation_study; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.investigation_study (
+ investigation_id bigint NOT NULL,
+ study_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: logging; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.logging (
+ logging_id bigint NOT NULL,
+ "time" timestamp without time zone NOT NULL,
+ severity_id integer NOT NULL,
+ msg character varying NOT NULL,
+ information character varying
+);
+
+
+
+--
+-- Name: COLUMN logging."time"; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.logging."time" IS 'Time the error was thrown';
+
+
+--
+-- Name: COLUMN logging.msg; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.logging.msg IS 'Error message thrown';
+
+
+--
+-- Name: COLUMN logging.information; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.logging.information IS 'Other applicable information (depending on error)';
+
+
+--
+-- Name: logging_logging_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.logging_logging_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: logging_logging_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.logging_logging_id_seq OWNED BY qiita.logging.logging_id;
+
+
+--
+-- Name: message; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.message (
+ message_id bigint NOT NULL,
+ message character varying NOT NULL,
+ message_time timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
+ expiration timestamp without time zone
+);
+
+
+
+--
+-- Name: message_message_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.message_message_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: message_message_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.message_message_id_seq OWNED BY qiita.message.message_id;
+
+
+--
+-- Name: message_user; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.message_user (
+ email character varying NOT NULL,
+ message_id bigint NOT NULL,
+ read boolean DEFAULT false NOT NULL
+);
+
+
+
+--
+-- Name: COLUMN message_user.read; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.message_user.read IS 'Whether the message has been read or not.';
+
+
+--
+-- Name: mixs_field_description; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.mixs_field_description (
+ column_name character varying NOT NULL,
+ data_type character varying NOT NULL,
+ desc_or_value character varying NOT NULL,
+ definition character varying NOT NULL,
+ min_length integer,
+ active integer NOT NULL
+);
+
+
+
+--
+-- Name: oauth_identifiers; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.oauth_identifiers (
+ client_id character varying(50) NOT NULL,
+ client_secret character varying(255)
+);
+
+
+
+--
+-- Name: oauth_software; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.oauth_software (
+ software_id bigint NOT NULL,
+ client_id character varying NOT NULL
+);
+
+
+
+--
+-- Name: ontology; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.ontology (
+ ontology_id bigint NOT NULL,
+ ontology character varying NOT NULL,
+ fully_loaded boolean NOT NULL,
+ fullname character varying,
+ query_url character varying,
+ source_url character varying,
+ definition text,
+ load_date date NOT NULL
+);
+
+
+
+--
+-- Name: parameter_artifact_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.parameter_artifact_type (
+ command_parameter_id bigint NOT NULL,
+ artifact_type_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: parameter_artifact_type_command_parameter_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.parameter_artifact_type_command_parameter_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: parameter_artifact_type_command_parameter_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.parameter_artifact_type_command_parameter_id_seq OWNED BY qiita.parameter_artifact_type.command_parameter_id;
+
+
+--
+-- Name: per_study_tags; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.per_study_tags (
+ study_id bigint NOT NULL,
+ study_tag character varying NOT NULL
+);
+
+
+
+--
+-- Name: portal_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.portal_type (
+ portal_type_id bigint NOT NULL,
+ portal character varying NOT NULL,
+ portal_description character varying NOT NULL
+);
+
+
+
+--
+-- Name: TABLE portal_type; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.portal_type IS 'What portals are available to show a study in';
+
+
+--
+-- Name: portal_type_portal_type_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.portal_type_portal_type_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: portal_type_portal_type_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.portal_type_portal_type_id_seq OWNED BY qiita.portal_type.portal_type_id;
+
+
+--
+-- Name: prep_1; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_1 (
+ sample_id character varying NOT NULL,
+ sample_values jsonb
+);
+
+
+
+--
+-- Name: prep_2; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_2 (
+ sample_id character varying NOT NULL,
+ sample_values jsonb
+);
+
+
+
+--
+-- Name: prep_template; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_template (
+ prep_template_id bigint NOT NULL,
+ data_type_id bigint NOT NULL,
+ preprocessing_status character varying DEFAULT 'not_preprocessed'::character varying NOT NULL,
+ investigation_type character varying,
+ artifact_id bigint,
+ name character varying DEFAULT 'Default Name'::character varying NOT NULL,
+ deprecated boolean DEFAULT false,
+ creation_timestamp timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
+ modification_timestamp timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
+ creation_job_id uuid
+);
+
+
+
+--
+-- Name: COLUMN prep_template.investigation_type; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.prep_template.investigation_type IS 'The investigation type (e.g., one of the values from EBI`s set of known types)';
+
+
+--
+-- Name: prep_template_filepath; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_template_filepath (
+ prep_template_id bigint NOT NULL,
+ filepath_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: prep_template_prep_template_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.prep_template_prep_template_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: prep_template_prep_template_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.prep_template_prep_template_id_seq OWNED BY qiita.prep_template.prep_template_id;
+
+
+--
+-- Name: prep_template_processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_template_processing_job (
+ prep_template_id bigint NOT NULL,
+ processing_job_id uuid NOT NULL
+);
+
+
+
+--
+-- Name: prep_template_sample; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.prep_template_sample (
+ prep_template_id bigint NOT NULL,
+ sample_id character varying NOT NULL,
+ ebi_experiment_accession character varying
+);
+
+
+
+--
+-- Name: COLUMN prep_template_sample.prep_template_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.prep_template_sample.prep_template_id IS 'The prep template identifier';
+
+
+--
+-- Name: preparation_artifact; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.preparation_artifact (
+ prep_template_id bigint NOT NULL,
+ artifact_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: processing_job; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job (
+ processing_job_id uuid DEFAULT public.uuid_generate_v4() NOT NULL,
+ email character varying NOT NULL,
+ command_id bigint NOT NULL,
+ command_parameters json NOT NULL,
+ processing_job_status_id bigint NOT NULL,
+ logging_id bigint,
+ heartbeat timestamp without time zone,
+ step character varying,
+ pending json,
+ hidden boolean DEFAULT false,
+ external_job_id character varying
+);
+
+
+
+--
+-- Name: COLUMN processing_job.email; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.email IS 'The user that launched the job';
+
+
+--
+-- Name: COLUMN processing_job.command_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.command_id IS 'The command launched';
+
+
+--
+-- Name: COLUMN processing_job.command_parameters; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.command_parameters IS 'The parameters used in the command';
+
+
+--
+-- Name: COLUMN processing_job.logging_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.logging_id IS 'In case of failure, point to the log entry that holds more information about the error';
+
+
+--
+-- Name: COLUMN processing_job.heartbeat; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.heartbeat IS 'The last heartbeat received by this job';
+
+
+--
+-- Name: COLUMN processing_job.external_job_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.processing_job.external_job_id IS 'Store an external job ID (e.g. Torque job ID) associated this Qiita job.';
+
+
+--
+-- Name: processing_job_resource_allocation; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job_resource_allocation (
+ name character varying NOT NULL,
+ description character varying,
+ job_type character varying NOT NULL,
+ allocation character varying
+);
+
+
+
+--
+-- Name: processing_job_status; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job_status (
+ processing_job_status_id bigint NOT NULL,
+ processing_job_status character varying NOT NULL,
+ processing_job_status_description character varying NOT NULL
+);
+
+
+
+--
+-- Name: processing_job_status_processing_job_status_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.processing_job_status_processing_job_status_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: processing_job_status_processing_job_status_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.processing_job_status_processing_job_status_id_seq OWNED BY qiita.processing_job_status.processing_job_status_id;
+
+
+--
+-- Name: processing_job_validator; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job_validator (
+ processing_job_id uuid NOT NULL,
+ validator_id uuid NOT NULL,
+ artifact_info json
+);
+
+
+
+--
+-- Name: processing_job_workflow; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job_workflow (
+ processing_job_workflow_id bigint NOT NULL,
+ email character varying NOT NULL,
+ name character varying
+);
+
+
+
+--
+-- Name: processing_job_workflow_processing_job_workflow_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.processing_job_workflow_processing_job_workflow_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: processing_job_workflow_processing_job_workflow_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.processing_job_workflow_processing_job_workflow_id_seq OWNED BY qiita.processing_job_workflow.processing_job_workflow_id;
+
+
+--
+-- Name: processing_job_workflow_root; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.processing_job_workflow_root (
+ processing_job_workflow_id bigint NOT NULL,
+ processing_job_id uuid NOT NULL
+);
+
+
+
+--
+-- Name: publication; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.publication (
+ doi character varying NOT NULL,
+ pubmed_id character varying
+);
+
+
+
+--
+-- Name: qiita_user; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.qiita_user (
+ email character varying NOT NULL,
+ user_level_id integer DEFAULT 5 NOT NULL,
+ password character varying NOT NULL,
+ name character varying,
+ affiliation character varying,
+ address character varying,
+ phone character varying,
+ user_verify_code character varying,
+ pass_reset_code character varying,
+ pass_reset_timestamp timestamp without time zone,
+ receive_processing_job_emails boolean DEFAULT false
+);
+
+
+
+--
+-- Name: TABLE qiita_user; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.qiita_user IS 'Holds all user information';
+
+
+--
+-- Name: COLUMN qiita_user.user_level_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.qiita_user.user_level_id IS 'user level';
+
+
+--
+-- Name: COLUMN qiita_user.user_verify_code; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.qiita_user.user_verify_code IS 'Code for initial user email verification';
+
+
+--
+-- Name: COLUMN qiita_user.pass_reset_code; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.qiita_user.pass_reset_code IS 'Randomly generated code for password reset';
+
+
+--
+-- Name: COLUMN qiita_user.pass_reset_timestamp; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.qiita_user.pass_reset_timestamp IS 'Time the reset code was generated';
+
+
+--
+-- Name: reference; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.reference (
+ reference_id bigint NOT NULL,
+ reference_name character varying NOT NULL,
+ reference_version character varying,
+ sequence_filepath bigint NOT NULL,
+ taxonomy_filepath bigint,
+ tree_filepath bigint
+);
+
+
+
+--
+-- Name: reference_reference_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.reference_reference_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: reference_reference_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.reference_reference_id_seq OWNED BY qiita.reference.reference_id;
+
+
+--
+-- Name: restrictions; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.restrictions (
+ table_name character varying,
+ name character varying,
+ valid_values character varying[]
+);
+
+
+
+--
+-- Name: sample_1; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.sample_1 (
+ sample_id character varying NOT NULL,
+ sample_values jsonb
+);
+
+
+
+--
+-- Name: sample_template_filepath; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.sample_template_filepath (
+ study_id bigint NOT NULL,
+ filepath_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: severity; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.severity (
+ severity_id integer NOT NULL,
+ severity character varying NOT NULL
+);
+
+
+
+--
+-- Name: severity_severity_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.severity_severity_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: severity_severity_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.severity_severity_id_seq OWNED BY qiita.severity.severity_id;
+
+
+--
+-- Name: software; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.software (
+ software_id bigint NOT NULL,
+ name character varying NOT NULL,
+ version character varying NOT NULL,
+ description character varying NOT NULL,
+ environment_script character varying NOT NULL,
+ start_script character varying NOT NULL,
+ software_type_id bigint NOT NULL,
+ active boolean DEFAULT false NOT NULL,
+ deprecated boolean DEFAULT false
+);
+
+
+
+--
+-- Name: software_artifact_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.software_artifact_type (
+ software_id bigint NOT NULL,
+ artifact_type_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: TABLE software_artifact_type; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.software_artifact_type IS 'In case that the software is of type "type plugin", it holds the artifact types that such software can validate and generate the summary.';
+
+
+--
+-- Name: software_command; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.software_command (
+ command_id bigint NOT NULL,
+ name character varying NOT NULL,
+ software_id bigint NOT NULL,
+ description character varying NOT NULL,
+ active boolean DEFAULT true NOT NULL,
+ is_analysis boolean DEFAULT false NOT NULL,
+ ignore_parent_command boolean DEFAULT false NOT NULL,
+ post_processing_cmd character varying
+);
+
+
+
+--
+-- Name: COLUMN software_command.post_processing_cmd; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.software_command.post_processing_cmd IS 'Store information on additional post-processing steps for merged BIOMs, if any.';
+
+
+--
+-- Name: software_command_command_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.software_command_command_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: software_command_command_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.software_command_command_id_seq OWNED BY qiita.software_command.command_id;
+
+
+--
+-- Name: software_publication; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.software_publication (
+ software_id bigint NOT NULL,
+ publication_doi character varying NOT NULL
+);
+
+
+
+--
+-- Name: software_software_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.software_software_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: software_software_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.software_software_id_seq OWNED BY qiita.software.software_id;
+
+
+--
+-- Name: software_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.software_type (
+ software_type_id bigint NOT NULL,
+ software_type character varying NOT NULL,
+ description character varying NOT NULL
+);
+
+
+
+--
+-- Name: software_type_software_type_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.software_type_software_type_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: software_type_software_type_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.software_type_software_type_id_seq OWNED BY qiita.software_type.software_type_id;
+
+
+--
+-- Name: stats_daily; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.stats_daily (
+ stats jsonb NOT NULL,
+ stats_timestamp timestamp without time zone NOT NULL
+);
+
+
+
+--
+-- Name: study; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study (
+ study_id bigint NOT NULL,
+ email character varying NOT NULL,
+ first_contact timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
+ funding character varying,
+ timeseries_type_id bigint NOT NULL,
+ lab_person_id bigint,
+ metadata_complete boolean NOT NULL,
+ mixs_compliant boolean NOT NULL,
+ most_recent_contact timestamp without time zone,
+ principal_investigator_id bigint NOT NULL,
+ reprocess boolean NOT NULL,
+ spatial_series boolean,
+ study_title character varying NOT NULL,
+ study_alias character varying NOT NULL,
+ study_description text NOT NULL,
+ study_abstract text NOT NULL,
+ vamps_id character varying,
+ ebi_study_accession character varying,
+ public_raw_download boolean DEFAULT false,
+ notes text DEFAULT ''::text NOT NULL,
+ autoloaded boolean DEFAULT false NOT NULL
+);
+
+
+
+--
+-- Name: COLUMN study.study_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.study.study_id IS 'Unique name for study';
+
+
+--
+-- Name: COLUMN study.email; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.study.email IS 'Email of study owner';
+
+
+--
+-- Name: COLUMN study.timeseries_type_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.study.timeseries_type_id IS 'What type of timeseries this study is (or is not)
+Controlled Vocabulary';
+
+
+--
+-- Name: study_artifact; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_artifact (
+ study_id bigint NOT NULL,
+ artifact_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: study_environmental_package; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_environmental_package (
+ study_id bigint NOT NULL,
+ environmental_package_name character varying NOT NULL
+);
+
+
+
+--
+-- Name: TABLE study_environmental_package; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_environmental_package IS 'Holds the 1 to many relationship between the study and the environmental_package';
+
+
+--
+-- Name: study_person; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_person (
+ study_person_id bigint NOT NULL,
+ name character varying NOT NULL,
+ email character varying NOT NULL,
+ affiliation character varying NOT NULL,
+ address character varying(100),
+ phone character varying
+);
+
+
+
+--
+-- Name: TABLE study_person; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_person IS 'Contact information for the various people involved in a study';
+
+
+--
+-- Name: COLUMN study_person.affiliation; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.study_person.affiliation IS 'The institution with which this person is affiliated';
+
+
+--
+-- Name: study_person_study_person_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.study_person_study_person_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: study_person_study_person_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.study_person_study_person_id_seq OWNED BY qiita.study_person.study_person_id;
+
+
+--
+-- Name: study_portal; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_portal (
+ study_id bigint NOT NULL,
+ portal_type_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: TABLE study_portal; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_portal IS 'Controls what studies are visible on what portals';
+
+
+--
+-- Name: study_prep_template; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_prep_template (
+ study_id bigint NOT NULL,
+ prep_template_id bigint NOT NULL
+);
+
+
+
+--
+-- Name: TABLE study_prep_template; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_prep_template IS 'links study to its prep templates';
+
+
+--
+-- Name: study_publication; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_publication (
+ study_id bigint NOT NULL,
+ publication character varying NOT NULL,
+ is_doi boolean
+);
+
+
+
+--
+-- Name: study_sample; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_sample (
+ sample_id character varying NOT NULL,
+ study_id bigint NOT NULL,
+ ebi_sample_accession character varying,
+ biosample_accession character varying
+);
+
+
+
+--
+-- Name: TABLE study_sample; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_sample IS 'Required info for each sample. One row is one sample.';
+
+
+--
+-- Name: visibility; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.visibility (
+ visibility_id bigint NOT NULL,
+ visibility character varying NOT NULL,
+ visibility_description character varying NOT NULL
+);
+
+
+
+--
+-- Name: study_status_study_status_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.study_status_study_status_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: study_status_study_status_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.study_status_study_status_id_seq OWNED BY qiita.visibility.visibility_id;
+
+
+--
+-- Name: study_study_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.study_study_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: study_study_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.study_study_id_seq OWNED BY qiita.study.study_id;
+
+
+--
+-- Name: study_tags; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_tags (
+ email character varying NOT NULL,
+ study_tag character varying NOT NULL
+);
+
+
+
+--
+-- Name: study_users; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.study_users (
+ study_id bigint NOT NULL,
+ email character varying NOT NULL
+);
+
+
+
+--
+-- Name: TABLE study_users; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.study_users IS 'Links shared studies to users they are shared with';
+
+
+--
+-- Name: term; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.term (
+ term_id bigint NOT NULL,
+ ontology_id bigint NOT NULL,
+ old_term_id bigint,
+ term character varying NOT NULL,
+ identifier character varying,
+ definition character varying,
+ namespace character varying,
+ is_obsolete boolean DEFAULT false,
+ is_root_term boolean,
+ is_leaf boolean,
+ user_defined boolean DEFAULT false NOT NULL
+);
+
+
+
+--
+-- Name: COLUMN term.old_term_id; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.term.old_term_id IS 'Identifier used in the old system, we are keeping this for consistency';
+
+
+--
+-- Name: COLUMN term.user_defined; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.term.user_defined IS 'Whether or not this term was defined by a user';
+
+
+--
+-- Name: term_term_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.term_term_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: term_term_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.term_term_id_seq OWNED BY qiita.term.term_id;
+
+
+--
+-- Name: timeseries_type; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.timeseries_type (
+ timeseries_type_id bigint NOT NULL,
+ timeseries_type character varying NOT NULL,
+ intervention_type character varying DEFAULT 'None'::character varying NOT NULL
+);
+
+
+
+--
+-- Name: timeseries_type_timeseries_type_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.timeseries_type_timeseries_type_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: timeseries_type_timeseries_type_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.timeseries_type_timeseries_type_id_seq OWNED BY qiita.timeseries_type.timeseries_type_id;
+
+
+--
+-- Name: user_level; Type: TABLE; Schema: qiita
+--
+
+CREATE TABLE qiita.user_level (
+ user_level_id integer NOT NULL,
+ name character varying NOT NULL,
+ description text NOT NULL,
+ slurm_parameters character varying DEFAULT '--nice=10000'::character varying NOT NULL
+);
+
+
+
+--
+-- Name: TABLE user_level; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON TABLE qiita.user_level IS 'Holds available user levels';
+
+
+--
+-- Name: COLUMN user_level.name; Type: COMMENT; Schema: qiita
+--
+
+COMMENT ON COLUMN qiita.user_level.name IS 'One of the user levels (admin, user, guest, etc)';
+
+
+--
+-- Name: user_level_user_level_id_seq; Type: SEQUENCE; Schema: qiita
+--
+
+CREATE SEQUENCE qiita.user_level_user_level_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+
+--
+-- Name: user_level_user_level_id_seq; Type: SEQUENCE OWNED BY; Schema: qiita
+--
+
+ALTER SEQUENCE qiita.user_level_user_level_id_seq OWNED BY qiita.user_level.user_level_id;
+
+
+--
+-- Name: analysis analysis_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis ALTER COLUMN analysis_id SET DEFAULT nextval('qiita.analysis_analysis_id_seq'::regclass);
+
+
+--
+-- Name: archive_merging_scheme archive_merging_scheme_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.archive_merging_scheme ALTER COLUMN archive_merging_scheme_id SET DEFAULT nextval('qiita.archive_merging_scheme_archive_merging_scheme_id_seq'::regclass);
+
+
+--
+-- Name: artifact artifact_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact ALTER COLUMN artifact_id SET DEFAULT nextval('qiita.artifact_artifact_id_seq'::regclass);
+
+
+--
+-- Name: artifact_type artifact_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type ALTER COLUMN artifact_type_id SET DEFAULT nextval('qiita.filetype_filetype_id_seq'::regclass);
+
+
+--
+-- Name: checksum_algorithm checksum_algorithm_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.checksum_algorithm ALTER COLUMN checksum_algorithm_id SET DEFAULT nextval('qiita.checksum_algorithm_checksum_algorithm_id_seq'::regclass);
+
+
+--
+-- Name: column_controlled_vocabularies controlled_vocab_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_controlled_vocabularies ALTER COLUMN controlled_vocab_id SET DEFAULT nextval('qiita.column_controlled_vocabularies_controlled_vocab_id_seq'::regclass);
+
+
+--
+-- Name: command_output command_output_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_output ALTER COLUMN command_output_id SET DEFAULT nextval('qiita.command_output_command_output_id_seq'::regclass);
+
+
+--
+-- Name: command_parameter command_parameter_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_parameter ALTER COLUMN command_parameter_id SET DEFAULT nextval('qiita.command_parameter_command_parameter_id_seq'::regclass);
+
+
+--
+-- Name: controlled_vocab controlled_vocab_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.controlled_vocab ALTER COLUMN controlled_vocab_id SET DEFAULT nextval('qiita.controlled_vocab_controlled_vocab_id_seq'::regclass);
+
+
+--
+-- Name: controlled_vocab_values vocab_value_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.controlled_vocab_values ALTER COLUMN vocab_value_id SET DEFAULT nextval('qiita.controlled_vocab_values_vocab_value_id_seq'::regclass);
+
+
+--
+-- Name: data_directory data_directory_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.data_directory ALTER COLUMN data_directory_id SET DEFAULT nextval('qiita.data_directory_data_directory_id_seq'::regclass);
+
+
+--
+-- Name: data_type data_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.data_type ALTER COLUMN data_type_id SET DEFAULT nextval('qiita.data_type_data_type_id_seq'::regclass);
+
+
+--
+-- Name: default_parameter_set default_parameter_set_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_parameter_set ALTER COLUMN default_parameter_set_id SET DEFAULT nextval('qiita.default_parameter_set_default_parameter_set_id_seq'::regclass);
+
+
+--
+-- Name: default_workflow default_workflow_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow ALTER COLUMN default_workflow_id SET DEFAULT nextval('qiita.default_workflow_default_workflow_id_seq'::regclass);
+
+
+--
+-- Name: default_workflow_edge default_workflow_edge_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge ALTER COLUMN default_workflow_edge_id SET DEFAULT nextval('qiita.default_workflow_edge_default_workflow_edge_id_seq'::regclass);
+
+
+--
+-- Name: default_workflow_node default_workflow_node_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_node ALTER COLUMN default_workflow_node_id SET DEFAULT nextval('qiita.default_workflow_node_default_workflow_node_id_seq'::regclass);
+
+
+--
+-- Name: filepath filepath_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath ALTER COLUMN filepath_id SET DEFAULT nextval('qiita.filepath_filepath_id_seq'::regclass);
+
+
+--
+-- Name: filepath data_directory_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath ALTER COLUMN data_directory_id SET DEFAULT nextval('qiita.filepath_data_directory_id_seq'::regclass);
+
+
+--
+-- Name: filepath_type filepath_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath_type ALTER COLUMN filepath_type_id SET DEFAULT nextval('qiita.filepath_type_filepath_type_id_seq'::regclass);
+
+
+--
+-- Name: investigation investigation_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation ALTER COLUMN investigation_id SET DEFAULT nextval('qiita.investigation_investigation_id_seq'::regclass);
+
+
+--
+-- Name: logging logging_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.logging ALTER COLUMN logging_id SET DEFAULT nextval('qiita.logging_logging_id_seq'::regclass);
+
+
+--
+-- Name: message message_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.message ALTER COLUMN message_id SET DEFAULT nextval('qiita.message_message_id_seq'::regclass);
+
+
+--
+-- Name: parameter_artifact_type command_parameter_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parameter_artifact_type ALTER COLUMN command_parameter_id SET DEFAULT nextval('qiita.parameter_artifact_type_command_parameter_id_seq'::regclass);
+
+
+--
+-- Name: portal_type portal_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.portal_type ALTER COLUMN portal_type_id SET DEFAULT nextval('qiita.portal_type_portal_type_id_seq'::regclass);
+
+
+--
+-- Name: prep_template prep_template_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template ALTER COLUMN prep_template_id SET DEFAULT nextval('qiita.prep_template_prep_template_id_seq'::regclass);
+
+
+--
+-- Name: processing_job_status processing_job_status_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_status ALTER COLUMN processing_job_status_id SET DEFAULT nextval('qiita.processing_job_status_processing_job_status_id_seq'::regclass);
+
+
+--
+-- Name: processing_job_workflow processing_job_workflow_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow ALTER COLUMN processing_job_workflow_id SET DEFAULT nextval('qiita.processing_job_workflow_processing_job_workflow_id_seq'::regclass);
+
+
+--
+-- Name: reference reference_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.reference ALTER COLUMN reference_id SET DEFAULT nextval('qiita.reference_reference_id_seq'::regclass);
+
+
+--
+-- Name: severity severity_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.severity ALTER COLUMN severity_id SET DEFAULT nextval('qiita.severity_severity_id_seq'::regclass);
+
+
+--
+-- Name: software software_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software ALTER COLUMN software_id SET DEFAULT nextval('qiita.software_software_id_seq'::regclass);
+
+
+--
+-- Name: software_command command_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_command ALTER COLUMN command_id SET DEFAULT nextval('qiita.software_command_command_id_seq'::regclass);
+
+
+--
+-- Name: software_type software_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_type ALTER COLUMN software_type_id SET DEFAULT nextval('qiita.software_type_software_type_id_seq'::regclass);
+
+
+--
+-- Name: study study_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study ALTER COLUMN study_id SET DEFAULT nextval('qiita.study_study_id_seq'::regclass);
+
+
+--
+-- Name: study_person study_person_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_person ALTER COLUMN study_person_id SET DEFAULT nextval('qiita.study_person_study_person_id_seq'::regclass);
+
+
+--
+-- Name: term term_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.term ALTER COLUMN term_id SET DEFAULT nextval('qiita.term_term_id_seq'::regclass);
+
+
+--
+-- Name: timeseries_type timeseries_type_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.timeseries_type ALTER COLUMN timeseries_type_id SET DEFAULT nextval('qiita.timeseries_type_timeseries_type_id_seq'::regclass);
+
+
+--
+-- Name: user_level user_level_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.user_level ALTER COLUMN user_level_id SET DEFAULT nextval('qiita.user_level_user_level_id_seq'::regclass);
+
+
+--
+-- Name: visibility visibility_id; Type: DEFAULT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.visibility ALTER COLUMN visibility_id SET DEFAULT nextval('qiita.study_status_study_status_id_seq'::regclass);
+
+
+--
+-- Name: default_workflow_data_type default_workflow_data_type_pkey; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_data_type
+ ADD CONSTRAINT default_workflow_data_type_pkey PRIMARY KEY (default_workflow_id, data_type_id);
+
+
+--
+-- Name: download_link download_link_pkey; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.download_link
+ ADD CONSTRAINT download_link_pkey PRIMARY KEY (jti);
+
+
+--
+-- Name: analysis_artifact idx_analysis_artifact_0; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_artifact
+ ADD CONSTRAINT idx_analysis_artifact_0 PRIMARY KEY (analysis_id, artifact_id);
+
+
+--
+-- Name: analysis_filepath idx_analysis_filepath_1; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_filepath
+ ADD CONSTRAINT idx_analysis_filepath_1 PRIMARY KEY (analysis_id, filepath_id);
+
+
+--
+-- Name: analysis_processing_job idx_analysis_processing_job; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_processing_job
+ ADD CONSTRAINT idx_analysis_processing_job PRIMARY KEY (analysis_id, processing_job_id);
+
+
+--
+-- Name: analysis_users idx_analysis_users; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_users
+ ADD CONSTRAINT idx_analysis_users PRIMARY KEY (analysis_id, email);
+
+
+--
+-- Name: archive_feature_value idx_archive_feature_value; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.archive_feature_value
+ ADD CONSTRAINT idx_archive_feature_value PRIMARY KEY (archive_merging_scheme_id, archive_feature);
+
+
+--
+-- Name: artifact_filepath idx_artifact_filepath; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_filepath
+ ADD CONSTRAINT idx_artifact_filepath PRIMARY KEY (artifact_id, filepath_id);
+
+
+--
+-- Name: artifact_processing_job idx_artifact_processing_job; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_processing_job
+ ADD CONSTRAINT idx_artifact_processing_job PRIMARY KEY (artifact_id, processing_job_id);
+
+
+--
+-- Name: artifact_type_filepath_type idx_artifact_type_filepath_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type_filepath_type
+ ADD CONSTRAINT idx_artifact_type_filepath_type PRIMARY KEY (artifact_type_id, filepath_type_id);
+
+
+--
+-- Name: checksum_algorithm idx_checksum_algorithm; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.checksum_algorithm
+ ADD CONSTRAINT idx_checksum_algorithm UNIQUE (name);
+
+
+--
+-- Name: column_controlled_vocabularies idx_column_controlled_vocabularies; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_controlled_vocabularies
+ ADD CONSTRAINT idx_column_controlled_vocabularies PRIMARY KEY (controlled_vocab_id, column_name);
+
+
+--
+-- Name: column_ontology idx_column_ontology; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_ontology
+ ADD CONSTRAINT idx_column_ontology PRIMARY KEY (column_name, ontology_short_name);
+
+
+--
+-- Name: command_output idx_command_output; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_output
+ ADD CONSTRAINT idx_command_output UNIQUE (name, command_id);
+
+
+--
+-- Name: command_parameter idx_command_parameter_0; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_parameter
+ ADD CONSTRAINT idx_command_parameter_0 UNIQUE (command_id, parameter_name);
+
+
+--
+-- Name: prep_template_sample idx_common_prep_info; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_sample
+ ADD CONSTRAINT idx_common_prep_info PRIMARY KEY (prep_template_id, sample_id);
+
+
+--
+-- Name: data_type idx_data_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.data_type
+ ADD CONSTRAINT idx_data_type UNIQUE (data_type);
+
+
+--
+-- Name: default_parameter_set idx_default_parameter_set_0; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_parameter_set
+ ADD CONSTRAINT idx_default_parameter_set_0 UNIQUE (command_id, parameter_set_name);
+
+
+--
+-- Name: default_workflow_edge_connections idx_default_workflow_edge_connections; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge_connections
+ ADD CONSTRAINT idx_default_workflow_edge_connections PRIMARY KEY (default_workflow_edge_id, parent_output_id, child_input_id);
+
+
+--
+-- Name: filepath_type idx_filepath_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath_type
+ ADD CONSTRAINT idx_filepath_type UNIQUE (filepath_type);
+
+
+--
+-- Name: artifact_type idx_filetype; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type
+ ADD CONSTRAINT idx_filetype UNIQUE (artifact_type);
+
+
+--
+-- Name: investigation_study idx_investigation_study; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation_study
+ ADD CONSTRAINT idx_investigation_study PRIMARY KEY (investigation_id, study_id);
+
+
+--
+-- Name: message_user idx_message_user; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.message_user
+ ADD CONSTRAINT idx_message_user PRIMARY KEY (email, message_id);
+
+
+--
+-- Name: oauth_software idx_oauth_software; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.oauth_software
+ ADD CONSTRAINT idx_oauth_software PRIMARY KEY (software_id, client_id);
+
+
+--
+-- Name: ontology idx_ontology; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.ontology
+ ADD CONSTRAINT idx_ontology UNIQUE (ontology);
+
+
+--
+-- Name: parameter_artifact_type idx_parameter_artifact_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parameter_artifact_type
+ ADD CONSTRAINT idx_parameter_artifact_type PRIMARY KEY (command_parameter_id, artifact_type_id);
+
+
+--
+-- Name: parent_artifact idx_parent_artifact; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_artifact
+ ADD CONSTRAINT idx_parent_artifact PRIMARY KEY (artifact_id, parent_id);
+
+
+--
+-- Name: parent_processing_job idx_parent_processing_job; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_processing_job
+ ADD CONSTRAINT idx_parent_processing_job PRIMARY KEY (parent_id, child_id);
+
+
+--
+-- Name: prep_template_filepath idx_prep_template_filepath; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_filepath
+ ADD CONSTRAINT idx_prep_template_filepath PRIMARY KEY (prep_template_id, filepath_id);
+
+
+--
+-- Name: prep_template_processing_job idx_prep_template_processing_job; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_processing_job
+ ADD CONSTRAINT idx_prep_template_processing_job PRIMARY KEY (prep_template_id, processing_job_id);
+
+
+--
+-- Name: processing_job_validator idx_processing_job_validator; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_validator
+ ADD CONSTRAINT idx_processing_job_validator PRIMARY KEY (processing_job_id, validator_id);
+
+
+--
+-- Name: processing_job_workflow_root idx_processing_job_workflow_root_0; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow_root
+ ADD CONSTRAINT idx_processing_job_workflow_root_0 PRIMARY KEY (processing_job_workflow_id, processing_job_id);
+
+
+--
+-- Name: study_sample idx_required_sample_info_1; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_sample
+ ADD CONSTRAINT idx_required_sample_info_1 PRIMARY KEY (sample_id);
+
+
+--
+-- Name: sample_template_filepath idx_sample_template_filepath; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.sample_template_filepath
+ ADD CONSTRAINT idx_sample_template_filepath PRIMARY KEY (study_id, filepath_id);
+
+
+--
+-- Name: severity idx_severity; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.severity
+ ADD CONSTRAINT idx_severity UNIQUE (severity);
+
+
+--
+-- Name: software_artifact_type idx_software_artifact_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_artifact_type
+ ADD CONSTRAINT idx_software_artifact_type PRIMARY KEY (software_id, artifact_type_id);
+
+
+--
+-- Name: software_publication idx_software_publication_0; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_publication
+ ADD CONSTRAINT idx_software_publication_0 PRIMARY KEY (software_id, publication_doi);
+
+
+--
+-- Name: study_artifact idx_study_artifact; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_artifact
+ ADD CONSTRAINT idx_study_artifact PRIMARY KEY (study_id, artifact_id);
+
+
+--
+-- Name: study_person idx_study_person; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_person
+ ADD CONSTRAINT idx_study_person UNIQUE (name, affiliation);
+
+
+--
+-- Name: study_prep_template idx_study_prep_template; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_prep_template
+ ADD CONSTRAINT idx_study_prep_template PRIMARY KEY (study_id, prep_template_id);
+
+
+--
+-- Name: visibility idx_study_status; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.visibility
+ ADD CONSTRAINT idx_study_status UNIQUE (visibility);
+
+
+--
+-- Name: study_users idx_study_users; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_users
+ ADD CONSTRAINT idx_study_users PRIMARY KEY (study_id, email);
+
+
+--
+-- Name: timeseries_type idx_timeseries_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.timeseries_type
+ ADD CONSTRAINT idx_timeseries_type UNIQUE (timeseries_type, intervention_type);
+
+
+--
+-- Name: user_level idx_user_level; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.user_level
+ ADD CONSTRAINT idx_user_level UNIQUE (name);
+
+
+--
+-- Name: analysis pk_analysis; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis
+ ADD CONSTRAINT pk_analysis PRIMARY KEY (analysis_id);
+
+
+--
+-- Name: analysis_portal pk_analysis_portal; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_portal
+ ADD CONSTRAINT pk_analysis_portal PRIMARY KEY (analysis_id, portal_type_id);
+
+
+--
+-- Name: analysis_sample pk_analysis_sample; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_sample
+ ADD CONSTRAINT pk_analysis_sample PRIMARY KEY (analysis_id, artifact_id, sample_id);
+
+
+--
+-- Name: artifact pk_artifact; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact
+ ADD CONSTRAINT pk_artifact PRIMARY KEY (artifact_id);
+
+
+--
+-- Name: checksum_algorithm pk_checksum_algorithm; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.checksum_algorithm
+ ADD CONSTRAINT pk_checksum_algorithm PRIMARY KEY (checksum_algorithm_id);
+
+
+--
+-- Name: command_output pk_command_output; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_output
+ ADD CONSTRAINT pk_command_output PRIMARY KEY (command_output_id);
+
+
+--
+-- Name: command_parameter pk_command_parameter; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_parameter
+ ADD CONSTRAINT pk_command_parameter PRIMARY KEY (command_parameter_id);
+
+
+--
+-- Name: controlled_vocab_values pk_controlled_vocab_values; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.controlled_vocab_values
+ ADD CONSTRAINT pk_controlled_vocab_values PRIMARY KEY (vocab_value_id);
+
+
+--
+-- Name: controlled_vocab pk_controlled_vocabularies; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.controlled_vocab
+ ADD CONSTRAINT pk_controlled_vocabularies PRIMARY KEY (controlled_vocab_id);
+
+
+--
+-- Name: data_directory pk_data_directory; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.data_directory
+ ADD CONSTRAINT pk_data_directory PRIMARY KEY (data_directory_id);
+
+
+--
+-- Name: data_type pk_data_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.data_type
+ ADD CONSTRAINT pk_data_type PRIMARY KEY (data_type_id);
+
+
+--
+-- Name: default_parameter_set pk_default_parameter_set; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_parameter_set
+ ADD CONSTRAINT pk_default_parameter_set PRIMARY KEY (default_parameter_set_id);
+
+
+--
+-- Name: default_workflow pk_default_workflow; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow
+ ADD CONSTRAINT pk_default_workflow PRIMARY KEY (default_workflow_id);
+
+
+--
+-- Name: default_workflow_node pk_default_workflow_command; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_node
+ ADD CONSTRAINT pk_default_workflow_command PRIMARY KEY (default_workflow_node_id);
+
+
+--
+-- Name: default_workflow_edge pk_default_workflow_edge; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge
+ ADD CONSTRAINT pk_default_workflow_edge PRIMARY KEY (default_workflow_edge_id);
+
+
+--
+-- Name: environmental_package pk_environmental_package; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.environmental_package
+ ADD CONSTRAINT pk_environmental_package PRIMARY KEY (environmental_package_name);
+
+
+--
+-- Name: filepath pk_filepath; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath
+ ADD CONSTRAINT pk_filepath PRIMARY KEY (filepath_id);
+
+
+--
+-- Name: filepath_type pk_filepath_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath_type
+ ADD CONSTRAINT pk_filepath_type PRIMARY KEY (filepath_type_id);
+
+
+--
+-- Name: artifact_type pk_filetype; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type
+ ADD CONSTRAINT pk_filetype PRIMARY KEY (artifact_type_id);
+
+
+--
+-- Name: investigation pk_investigation; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation
+ ADD CONSTRAINT pk_investigation PRIMARY KEY (investigation_id);
+
+
+--
+-- Name: prep_1 pk_jsonb_prep_1; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_1
+ ADD CONSTRAINT pk_jsonb_prep_1 PRIMARY KEY (sample_id);
+
+
+--
+-- Name: prep_2 pk_jsonb_prep_2; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_2
+ ADD CONSTRAINT pk_jsonb_prep_2 PRIMARY KEY (sample_id);
+
+
+--
+-- Name: sample_1 pk_jsonb_sample_1; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.sample_1
+ ADD CONSTRAINT pk_jsonb_sample_1 PRIMARY KEY (sample_id);
+
+
+--
+-- Name: logging pk_logging; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.logging
+ ADD CONSTRAINT pk_logging PRIMARY KEY (logging_id);
+
+
+--
+-- Name: archive_merging_scheme pk_merging_scheme; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.archive_merging_scheme
+ ADD CONSTRAINT pk_merging_scheme PRIMARY KEY (archive_merging_scheme_id);
+
+
+--
+-- Name: message pk_message; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.message
+ ADD CONSTRAINT pk_message PRIMARY KEY (message_id);
+
+
+--
+-- Name: mixs_field_description pk_mixs_field_description; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.mixs_field_description
+ ADD CONSTRAINT pk_mixs_field_description PRIMARY KEY (column_name);
+
+
+--
+-- Name: oauth_identifiers pk_oauth2; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.oauth_identifiers
+ ADD CONSTRAINT pk_oauth2 PRIMARY KEY (client_id);
+
+
+--
+-- Name: ontology pk_ontology; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.ontology
+ ADD CONSTRAINT pk_ontology PRIMARY KEY (ontology_id);
+
+
+--
+-- Name: per_study_tags pk_per_study_tags; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.per_study_tags
+ ADD CONSTRAINT pk_per_study_tags PRIMARY KEY (study_tag, study_id);
+
+
+--
+-- Name: portal_type pk_portal_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.portal_type
+ ADD CONSTRAINT pk_portal_type PRIMARY KEY (portal_type_id);
+
+
+--
+-- Name: prep_template pk_prep_template; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template
+ ADD CONSTRAINT pk_prep_template PRIMARY KEY (prep_template_id);
+
+
+--
+-- Name: processing_job pk_processing_job; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job
+ ADD CONSTRAINT pk_processing_job PRIMARY KEY (processing_job_id);
+
+
+--
+-- Name: processing_job_status pk_processing_job_status; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_status
+ ADD CONSTRAINT pk_processing_job_status PRIMARY KEY (processing_job_status_id);
+
+
+--
+-- Name: processing_job_workflow pk_processing_job_workflow; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow
+ ADD CONSTRAINT pk_processing_job_workflow PRIMARY KEY (processing_job_workflow_id);
+
+
+--
+-- Name: publication pk_publication; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.publication
+ ADD CONSTRAINT pk_publication PRIMARY KEY (doi);
+
+
+--
+-- Name: reference pk_reference; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.reference
+ ADD CONSTRAINT pk_reference PRIMARY KEY (reference_id);
+
+
+--
+-- Name: severity pk_severity; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.severity
+ ADD CONSTRAINT pk_severity PRIMARY KEY (severity_id);
+
+
+--
+-- Name: software pk_software; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software
+ ADD CONSTRAINT pk_software PRIMARY KEY (software_id);
+
+
+--
+-- Name: software_command pk_software_command; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_command
+ ADD CONSTRAINT pk_software_command PRIMARY KEY (command_id);
+
+
+--
+-- Name: software_type pk_software_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_type
+ ADD CONSTRAINT pk_software_type PRIMARY KEY (software_type_id);
+
+
+--
+-- Name: study pk_study; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT pk_study PRIMARY KEY (study_id);
+
+
+--
+-- Name: study_environmental_package pk_study_environmental_package; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_environmental_package
+ ADD CONSTRAINT pk_study_environmental_package PRIMARY KEY (study_id, environmental_package_name);
+
+
+--
+-- Name: study_person pk_study_person; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_person
+ ADD CONSTRAINT pk_study_person PRIMARY KEY (study_person_id);
+
+
+--
+-- Name: study_portal pk_study_portal; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_portal
+ ADD CONSTRAINT pk_study_portal PRIMARY KEY (study_id, portal_type_id);
+
+
+--
+-- Name: visibility pk_study_status; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.visibility
+ ADD CONSTRAINT pk_study_status PRIMARY KEY (visibility_id);
+
+
+--
+-- Name: study_tags pk_study_tags; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_tags
+ ADD CONSTRAINT pk_study_tags PRIMARY KEY (study_tag);
+
+
+--
+-- Name: term pk_term; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.term
+ ADD CONSTRAINT pk_term PRIMARY KEY (term_id);
+
+
+--
+-- Name: timeseries_type pk_timeseries_type; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.timeseries_type
+ ADD CONSTRAINT pk_timeseries_type PRIMARY KEY (timeseries_type_id);
+
+
+--
+-- Name: qiita_user pk_user; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.qiita_user
+ ADD CONSTRAINT pk_user PRIMARY KEY (email);
+
+
+--
+-- Name: user_level pk_user_level; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.user_level
+ ADD CONSTRAINT pk_user_level PRIMARY KEY (user_level_id);
+
+
+--
+-- Name: preparation_artifact preparation_artifact_pkey; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.preparation_artifact
+ ADD CONSTRAINT preparation_artifact_pkey PRIMARY KEY (prep_template_id, artifact_id);
+
+
+--
+-- Name: processing_job_resource_allocation processing_job_resource_allocation_pkey; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_resource_allocation
+ ADD CONSTRAINT processing_job_resource_allocation_pkey PRIMARY KEY (name, job_type);
+
+
+--
+-- Name: study unique_study_title; Type: CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT unique_study_title UNIQUE (study_title);
+
+
+--
+-- Name: idx_analysis_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_0 ON qiita.analysis USING btree (logging_id);
+
+
+--
+-- Name: idx_analysis_artifact_analysis; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_artifact_analysis ON qiita.analysis_artifact USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_artifact_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_artifact_artifact ON qiita.analysis_artifact USING btree (artifact_id);
+
+
+--
+-- Name: idx_analysis_email; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_email ON qiita.analysis USING btree (email);
+
+
+--
+-- Name: idx_analysis_filepath; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_filepath ON qiita.analysis_filepath USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_filepath_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_filepath_0 ON qiita.analysis_filepath USING btree (filepath_id);
+
+
+--
+-- Name: idx_analysis_filepath_2; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_filepath_2 ON qiita.analysis_filepath USING btree (data_type_id);
+
+
+--
+-- Name: idx_analysis_portal; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_portal ON qiita.analysis_portal USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_portal_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_portal_0 ON qiita.analysis_portal USING btree (portal_type_id);
+
+
+--
+-- Name: idx_analysis_processing_job_analysis; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_processing_job_analysis ON qiita.analysis_processing_job USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_processing_job_pj; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_processing_job_pj ON qiita.analysis_processing_job USING btree (processing_job_id);
+
+
+--
+-- Name: idx_analysis_sample; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_sample ON qiita.analysis_sample USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_sample_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_sample_1 ON qiita.analysis_sample USING btree (sample_id);
+
+
+--
+-- Name: idx_analysis_sample_artifact_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_sample_artifact_id ON qiita.analysis_sample USING btree (artifact_id);
+
+
+--
+-- Name: idx_analysis_users_analysis; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_users_analysis ON qiita.analysis_users USING btree (analysis_id);
+
+
+--
+-- Name: idx_analysis_users_email; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_analysis_users_email ON qiita.analysis_users USING btree (email);
+
+
+--
+-- Name: idx_archive_feature_value_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_archive_feature_value_0 ON qiita.archive_feature_value USING btree (archive_merging_scheme_id);
+
+
+--
+-- Name: idx_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact ON qiita.artifact USING btree (command_id);
+
+
+--
+-- Name: idx_artifact_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_0 ON qiita.artifact USING btree (visibility_id);
+
+
+--
+-- Name: idx_artifact_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_1 ON qiita.artifact USING btree (artifact_type_id);
+
+
+--
+-- Name: idx_artifact_2; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_2 ON qiita.artifact USING btree (data_type_id);
+
+
+--
+-- Name: idx_artifact_filepath_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_filepath_artifact ON qiita.artifact_filepath USING btree (artifact_id);
+
+
+--
+-- Name: idx_artifact_filepath_filepath; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_filepath_filepath ON qiita.artifact_filepath USING btree (filepath_id);
+
+
+--
+-- Name: idx_artifact_output_processing_job_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_output_processing_job_artifact ON qiita.artifact_output_processing_job USING btree (artifact_id);
+
+
+--
+-- Name: idx_artifact_output_processing_job_cmd; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_output_processing_job_cmd ON qiita.artifact_output_processing_job USING btree (command_output_id);
+
+
+--
+-- Name: idx_artifact_output_processing_job_job; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_output_processing_job_job ON qiita.artifact_output_processing_job USING btree (processing_job_id);
+
+
+--
+-- Name: idx_artifact_processing_job_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_processing_job_artifact ON qiita.artifact_processing_job USING btree (artifact_id);
+
+
+--
+-- Name: idx_artifact_processing_job_job; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_processing_job_job ON qiita.artifact_processing_job USING btree (processing_job_id);
+
+
+--
+-- Name: idx_artifact_type_filepath_type_at; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_type_filepath_type_at ON qiita.artifact_type_filepath_type USING btree (artifact_type_id);
+
+
+--
+-- Name: idx_artifact_type_filepath_type_ft; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_artifact_type_filepath_type_ft ON qiita.artifact_type_filepath_type USING btree (filepath_type_id);
+
+
+--
+-- Name: idx_column_controlled_vocabularies_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_column_controlled_vocabularies_0 ON qiita.column_controlled_vocabularies USING btree (column_name);
+
+
+--
+-- Name: idx_column_controlled_vocabularies_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_column_controlled_vocabularies_1 ON qiita.column_controlled_vocabularies USING btree (controlled_vocab_id);
+
+
+--
+-- Name: idx_column_ontology_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_column_ontology_0 ON qiita.column_ontology USING btree (column_name);
+
+
+--
+-- Name: idx_command_output_cmd_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_command_output_cmd_id ON qiita.command_output USING btree (command_id);
+
+
+--
+-- Name: idx_command_output_type_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_command_output_type_id ON qiita.command_output USING btree (artifact_type_id);
+
+
+--
+-- Name: idx_command_parameter; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_command_parameter ON qiita.command_parameter USING btree (command_id);
+
+
+--
+-- Name: idx_common_prep_info_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_common_prep_info_0 ON qiita.prep_template_sample USING btree (sample_id);
+
+
+--
+-- Name: idx_common_prep_info_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_common_prep_info_1 ON qiita.prep_template_sample USING btree (prep_template_id);
+
+
+--
+-- Name: idx_controlled_vocab_values; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_controlled_vocab_values ON qiita.controlled_vocab_values USING btree (controlled_vocab_id);
+
+
+--
+-- Name: idx_default_parameter_set; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_parameter_set ON qiita.default_parameter_set USING btree (command_id);
+
+
+--
+-- Name: idx_default_workflow_command_dflt_param_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_command_dflt_param_id ON qiita.default_workflow_node USING btree (default_parameter_set_id);
+
+
+--
+-- Name: idx_default_workflow_command_dflt_wf_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_command_dflt_wf_id ON qiita.default_workflow_node USING btree (default_workflow_id);
+
+
+--
+-- Name: idx_default_workflow_edge_child; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_edge_child ON qiita.default_workflow_edge USING btree (child_id);
+
+
+--
+-- Name: idx_default_workflow_edge_connections_child; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_edge_connections_child ON qiita.default_workflow_edge_connections USING btree (child_input_id);
+
+
+--
+-- Name: idx_default_workflow_edge_connections_edge; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_edge_connections_edge ON qiita.default_workflow_edge_connections USING btree (default_workflow_edge_id);
+
+
+--
+-- Name: idx_default_workflow_edge_connections_parent; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_edge_connections_parent ON qiita.default_workflow_edge_connections USING btree (parent_output_id);
+
+
+--
+-- Name: idx_default_workflow_edge_parent; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_default_workflow_edge_parent ON qiita.default_workflow_edge USING btree (parent_id);
+
+
+--
+-- Name: idx_download_link_exp; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_download_link_exp ON qiita.download_link USING btree (exp);
+
+
+--
+-- Name: idx_ebi_run_accession_artifact_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_ebi_run_accession_artifact_id ON qiita.ebi_run_accession USING btree (artifact_id);
+
+
+--
+-- Name: idx_ebi_run_accession_sid; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_ebi_run_accession_sid ON qiita.ebi_run_accession USING btree (sample_id);
+
+
+--
+-- Name: idx_filepath; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_filepath ON qiita.filepath USING btree (filepath_type_id);
+
+
+--
+-- Name: idx_filepath_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_filepath_0 ON qiita.filepath USING btree (data_directory_id);
+
+
+--
+-- Name: idx_investigation; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_investigation ON qiita.investigation USING btree (contact_person_id);
+
+
+--
+-- Name: idx_investigation_study_investigation; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_investigation_study_investigation ON qiita.investigation_study USING btree (investigation_id);
+
+
+--
+-- Name: idx_investigation_study_study; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_investigation_study_study ON qiita.investigation_study USING btree (study_id);
+
+
+--
+-- Name: idx_logging_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_logging_0 ON qiita.logging USING btree (severity_id);
+
+
+--
+-- Name: idx_message_user_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_message_user_0 ON qiita.message_user USING btree (message_id);
+
+
+--
+-- Name: idx_message_user_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_message_user_1 ON qiita.message_user USING btree (email);
+
+
+--
+-- Name: idx_oauth_software_client; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_oauth_software_client ON qiita.oauth_software USING btree (client_id);
+
+
+--
+-- Name: idx_oauth_software_software; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_oauth_software_software ON qiita.oauth_software USING btree (software_id);
+
+
+--
+-- Name: idx_parameter_artifact_type_param_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parameter_artifact_type_param_id ON qiita.parameter_artifact_type USING btree (command_parameter_id);
+
+
+--
+-- Name: idx_parameter_artifact_type_type_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parameter_artifact_type_type_id ON qiita.parameter_artifact_type USING btree (artifact_type_id);
+
+
+--
+-- Name: idx_parent_artifact_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parent_artifact_artifact ON qiita.parent_artifact USING btree (artifact_id);
+
+
+--
+-- Name: idx_parent_artifact_parent; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parent_artifact_parent ON qiita.parent_artifact USING btree (parent_id);
+
+
+--
+-- Name: idx_parent_processing_job_child; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parent_processing_job_child ON qiita.parent_processing_job USING btree (child_id);
+
+
+--
+-- Name: idx_parent_processing_job_parent; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_parent_processing_job_parent ON qiita.parent_processing_job USING btree (parent_id);
+
+
+--
+-- Name: idx_prep_template; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_prep_template ON qiita.prep_template USING btree (data_type_id);
+
+
+--
+-- Name: idx_prep_template_artifact_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_prep_template_artifact_id ON qiita.prep_template USING btree (artifact_id);
+
+
+--
+-- Name: idx_prep_template_processing_job_job; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_prep_template_processing_job_job ON qiita.prep_template_processing_job USING btree (processing_job_id);
+
+
+--
+-- Name: idx_prep_template_processing_job_pt_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_prep_template_processing_job_pt_id ON qiita.prep_template_processing_job USING btree (prep_template_id);
+
+
+--
+-- Name: idx_preparation_artifact_prep_template_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_preparation_artifact_prep_template_id ON qiita.preparation_artifact USING btree (prep_template_id);
+
+
+--
+-- Name: idx_processing_job_command_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_command_id ON qiita.processing_job USING btree (command_id);
+
+
+--
+-- Name: idx_processing_job_email; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_email ON qiita.processing_job USING btree (email);
+
+
+--
+-- Name: idx_processing_job_logging; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_logging ON qiita.processing_job USING btree (logging_id);
+
+
+--
+-- Name: idx_processing_job_status_id; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_status_id ON qiita.processing_job USING btree (processing_job_status_id);
+
+
+--
+-- Name: idx_processing_job_validator_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_validator_0 ON qiita.processing_job_validator USING btree (processing_job_id);
+
+
+--
+-- Name: idx_processing_job_validator_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_validator_1 ON qiita.processing_job_validator USING btree (validator_id);
+
+
+--
+-- Name: idx_processing_job_workflow; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_workflow ON qiita.processing_job_workflow USING btree (email);
+
+
+--
+-- Name: idx_processing_job_workflow_root_job; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_workflow_root_job ON qiita.processing_job_workflow_root USING btree (processing_job_id);
+
+
+--
+-- Name: idx_processing_job_workflow_root_wf; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_processing_job_workflow_root_wf ON qiita.processing_job_workflow_root USING btree (processing_job_workflow_id);
+
+
+--
+-- Name: idx_reference; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_reference ON qiita.reference USING btree (sequence_filepath);
+
+
+--
+-- Name: idx_reference_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_reference_0 ON qiita.reference USING btree (taxonomy_filepath);
+
+
+--
+-- Name: idx_reference_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_reference_1 ON qiita.reference USING btree (tree_filepath);
+
+
+--
+-- Name: idx_required_prep_info_2; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_required_prep_info_2 ON qiita.prep_template_sample USING btree (sample_id);
+
+
+--
+-- Name: idx_required_sample_info; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_required_sample_info ON qiita.study_sample USING btree (study_id);
+
+
+--
+-- Name: idx_software_artifact_type_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_artifact_type_artifact ON qiita.software_artifact_type USING btree (artifact_type_id);
+
+
+--
+-- Name: idx_software_artifact_type_software; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_artifact_type_software ON qiita.software_artifact_type USING btree (software_id);
+
+
+--
+-- Name: idx_software_command; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_command ON qiita.software_command USING btree (software_id);
+
+
+--
+-- Name: idx_software_publication_publication; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_publication_publication ON qiita.software_publication USING btree (publication_doi);
+
+
+--
+-- Name: idx_software_publication_software; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_publication_software ON qiita.software_publication USING btree (software_id);
+
+
+--
+-- Name: idx_software_type; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_software_type ON qiita.software USING btree (software_type_id);
+
+
+--
+-- Name: idx_study; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study ON qiita.study USING btree (email);
+
+
+--
+-- Name: idx_study_2; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_2 ON qiita.study USING btree (lab_person_id);
+
+
+--
+-- Name: idx_study_3; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_3 ON qiita.study USING btree (principal_investigator_id);
+
+
+--
+-- Name: idx_study_4; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_4 ON qiita.study USING btree (timeseries_type_id);
+
+
+--
+-- Name: idx_study_artifact_artifact; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_artifact_artifact ON qiita.study_artifact USING btree (artifact_id);
+
+
+--
+-- Name: idx_study_artifact_study; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_artifact_study ON qiita.study_artifact USING btree (study_id);
+
+
+--
+-- Name: idx_study_environmental_package; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_environmental_package ON qiita.study_environmental_package USING btree (study_id);
+
+
+--
+-- Name: idx_study_environmental_package_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_environmental_package_0 ON qiita.study_environmental_package USING btree (environmental_package_name);
+
+
+--
+-- Name: idx_study_portal; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_portal ON qiita.study_portal USING btree (study_id);
+
+
+--
+-- Name: idx_study_portal_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_portal_0 ON qiita.study_portal USING btree (portal_type_id);
+
+
+--
+-- Name: idx_study_prep_template_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_prep_template_0 ON qiita.study_prep_template USING btree (study_id);
+
+
+--
+-- Name: idx_study_prep_template_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_prep_template_1 ON qiita.study_prep_template USING btree (prep_template_id);
+
+
+--
+-- Name: idx_study_publication_doi; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_publication_doi ON qiita.study_publication USING btree (publication);
+
+
+--
+-- Name: idx_study_publication_study; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_publication_study ON qiita.study_publication USING btree (study_id);
+
+
+--
+-- Name: idx_study_users_0; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_users_0 ON qiita.study_users USING btree (study_id);
+
+
+--
+-- Name: idx_study_users_1; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_study_users_1 ON qiita.study_users USING btree (email);
+
+
+--
+-- Name: idx_term; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_term ON qiita.term USING btree (ontology_id);
+
+
+--
+-- Name: idx_user; Type: INDEX; Schema: qiita
+--
+
+CREATE INDEX idx_user ON qiita.qiita_user USING btree (user_level_id);
+
+
+--
+-- Name: analysis_artifact fk_analysis_artifact_analysis; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_artifact
+ ADD CONSTRAINT fk_analysis_artifact_analysis FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id);
+
+
+--
+-- Name: analysis_artifact fk_analysis_artifact_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_artifact
+ ADD CONSTRAINT fk_analysis_artifact_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: analysis_filepath fk_analysis_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_filepath
+ ADD CONSTRAINT fk_analysis_filepath FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id);
+
+
+--
+-- Name: analysis_filepath fk_analysis_filepath_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_filepath
+ ADD CONSTRAINT fk_analysis_filepath_0 FOREIGN KEY (filepath_id) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: analysis_filepath fk_analysis_filepath_1; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_filepath
+ ADD CONSTRAINT fk_analysis_filepath_1 FOREIGN KEY (data_type_id) REFERENCES qiita.data_type(data_type_id);
+
+
+--
+-- Name: analysis fk_analysis_logging; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis
+ ADD CONSTRAINT fk_analysis_logging FOREIGN KEY (logging_id) REFERENCES qiita.logging(logging_id);
+
+
+--
+-- Name: analysis_portal fk_analysis_portal; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_portal
+ ADD CONSTRAINT fk_analysis_portal FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id);
+
+
+--
+-- Name: analysis_portal fk_analysis_portal_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_portal
+ ADD CONSTRAINT fk_analysis_portal_0 FOREIGN KEY (portal_type_id) REFERENCES qiita.portal_type(portal_type_id);
+
+
+--
+-- Name: analysis_processing_job fk_analysis_processing_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_processing_job
+ ADD CONSTRAINT fk_analysis_processing_job FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id);
+
+
+--
+-- Name: analysis_processing_job fk_analysis_processing_job_pj; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_processing_job
+ ADD CONSTRAINT fk_analysis_processing_job_pj FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: analysis_sample fk_analysis_sample; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_sample
+ ADD CONSTRAINT fk_analysis_sample FOREIGN KEY (sample_id) REFERENCES qiita.study_sample(sample_id) ON UPDATE CASCADE;
+
+
+--
+-- Name: analysis_sample fk_analysis_sample_analysis; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_sample
+ ADD CONSTRAINT fk_analysis_sample_analysis FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id);
+
+
+--
+-- Name: analysis_sample fk_analysis_sample_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_sample
+ ADD CONSTRAINT fk_analysis_sample_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: analysis fk_analysis_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis
+ ADD CONSTRAINT fk_analysis_user FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
+
+
+--
+-- Name: analysis_users fk_analysis_users_analysis; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_users
+ ADD CONSTRAINT fk_analysis_users_analysis FOREIGN KEY (analysis_id) REFERENCES qiita.analysis(analysis_id) ON UPDATE CASCADE ON DELETE CASCADE;
+
+
+--
+-- Name: analysis_users fk_analysis_users_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.analysis_users
+ ADD CONSTRAINT fk_analysis_users_user FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE ON DELETE CASCADE;
+
+
+--
+-- Name: archive_feature_value fk_archive_feature_value; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.archive_feature_value
+ ADD CONSTRAINT fk_archive_feature_value FOREIGN KEY (archive_merging_scheme_id) REFERENCES qiita.archive_merging_scheme(archive_merging_scheme_id);
+
+
+--
+-- Name: artifact fk_artifact_data_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact
+ ADD CONSTRAINT fk_artifact_data_type FOREIGN KEY (data_type_id) REFERENCES qiita.data_type(data_type_id);
+
+
+--
+-- Name: artifact_filepath fk_artifact_filepath_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_filepath
+ ADD CONSTRAINT fk_artifact_filepath_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: artifact_filepath fk_artifact_filepath_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_filepath
+ ADD CONSTRAINT fk_artifact_filepath_filepath FOREIGN KEY (filepath_id) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: preparation_artifact fk_artifact_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.preparation_artifact
+ ADD CONSTRAINT fk_artifact_id FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: artifact_output_processing_job fk_artifact_output_processing_job_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_output_processing_job
+ ADD CONSTRAINT fk_artifact_output_processing_job_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: artifact_output_processing_job fk_artifact_output_processing_job_cmd; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_output_processing_job
+ ADD CONSTRAINT fk_artifact_output_processing_job_cmd FOREIGN KEY (command_output_id) REFERENCES qiita.command_output(command_output_id);
+
+
+--
+-- Name: artifact_output_processing_job fk_artifact_output_processing_job_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_output_processing_job
+ ADD CONSTRAINT fk_artifact_output_processing_job_job FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: artifact_processing_job fk_artifact_processing_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_processing_job
+ ADD CONSTRAINT fk_artifact_processing_job FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: artifact_processing_job fk_artifact_processing_job_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_processing_job
+ ADD CONSTRAINT fk_artifact_processing_job_0 FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: artifact fk_artifact_software_command; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact
+ ADD CONSTRAINT fk_artifact_software_command FOREIGN KEY (command_id) REFERENCES qiita.software_command(command_id);
+
+
+--
+-- Name: artifact fk_artifact_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact
+ ADD CONSTRAINT fk_artifact_type FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id);
+
+
+--
+-- Name: artifact_type_filepath_type fk_artifact_type_filepath_type_at; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type_filepath_type
+ ADD CONSTRAINT fk_artifact_type_filepath_type_at FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id);
+
+
+--
+-- Name: artifact_type_filepath_type fk_artifact_type_filepath_type_ft; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact_type_filepath_type
+ ADD CONSTRAINT fk_artifact_type_filepath_type_ft FOREIGN KEY (filepath_type_id) REFERENCES qiita.filepath_type(filepath_type_id);
+
+
+--
+-- Name: default_workflow fk_artifact_type_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow
+ ADD CONSTRAINT fk_artifact_type_id FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id) ON UPDATE CASCADE;
+
+
+--
+-- Name: artifact fk_artifact_visibility; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.artifact
+ ADD CONSTRAINT fk_artifact_visibility FOREIGN KEY (visibility_id) REFERENCES qiita.visibility(visibility_id);
+
+
+--
+-- Name: column_controlled_vocabularies fk_column_controlled_vocab2; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_controlled_vocabularies
+ ADD CONSTRAINT fk_column_controlled_vocab2 FOREIGN KEY (controlled_vocab_id) REFERENCES qiita.controlled_vocab(controlled_vocab_id);
+
+
+--
+-- Name: column_controlled_vocabularies fk_column_controlled_vocabularies; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_controlled_vocabularies
+ ADD CONSTRAINT fk_column_controlled_vocabularies FOREIGN KEY (column_name) REFERENCES qiita.mixs_field_description(column_name);
+
+
+--
+-- Name: column_ontology fk_column_ontology; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.column_ontology
+ ADD CONSTRAINT fk_column_ontology FOREIGN KEY (column_name) REFERENCES qiita.mixs_field_description(column_name);
+
+
+--
+-- Name: command_output fk_command_output; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_output
+ ADD CONSTRAINT fk_command_output FOREIGN KEY (command_id) REFERENCES qiita.software_command(command_id);
+
+
+--
+-- Name: command_output fk_command_output_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_output
+ ADD CONSTRAINT fk_command_output_0 FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id);
+
+
+--
+-- Name: command_parameter fk_command_parameter; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.command_parameter
+ ADD CONSTRAINT fk_command_parameter FOREIGN KEY (command_id) REFERENCES qiita.software_command(command_id);
+
+
+--
+-- Name: prep_template_sample fk_common_prep_info; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_sample
+ ADD CONSTRAINT fk_common_prep_info FOREIGN KEY (sample_id) REFERENCES qiita.study_sample(sample_id) ON UPDATE CASCADE;
+
+
+--
+-- Name: controlled_vocab_values fk_controlled_vocab_values; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.controlled_vocab_values
+ ADD CONSTRAINT fk_controlled_vocab_values FOREIGN KEY (controlled_vocab_id) REFERENCES qiita.controlled_vocab(controlled_vocab_id) ON UPDATE CASCADE ON DELETE CASCADE;
+
+
+--
+-- Name: default_workflow_data_type fk_data_type_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_data_type
+ ADD CONSTRAINT fk_data_type_id FOREIGN KEY (data_type_id) REFERENCES qiita.data_type(data_type_id);
+
+
+--
+-- Name: default_parameter_set fk_default_parameter_set; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_parameter_set
+ ADD CONSTRAINT fk_default_parameter_set FOREIGN KEY (command_id) REFERENCES qiita.software_command(command_id);
+
+
+--
+-- Name: default_workflow_node fk_default_workflow_command_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_node
+ ADD CONSTRAINT fk_default_workflow_command_0 FOREIGN KEY (default_parameter_set_id) REFERENCES qiita.default_parameter_set(default_parameter_set_id);
+
+
+--
+-- Name: default_workflow_node fk_default_workflow_command_1; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_node
+ ADD CONSTRAINT fk_default_workflow_command_1 FOREIGN KEY (default_workflow_id) REFERENCES qiita.default_workflow(default_workflow_id);
+
+
+--
+-- Name: default_workflow_edge fk_default_workflow_edge; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge
+ ADD CONSTRAINT fk_default_workflow_edge FOREIGN KEY (parent_id) REFERENCES qiita.default_workflow_node(default_workflow_node_id);
+
+
+--
+-- Name: default_workflow_edge fk_default_workflow_edge_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge
+ ADD CONSTRAINT fk_default_workflow_edge_0 FOREIGN KEY (child_id) REFERENCES qiita.default_workflow_node(default_workflow_node_id);
+
+
+--
+-- Name: default_workflow_edge_connections fk_default_workflow_edge_connections; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge_connections
+ ADD CONSTRAINT fk_default_workflow_edge_connections FOREIGN KEY (parent_output_id) REFERENCES qiita.command_output(command_output_id);
+
+
+--
+-- Name: default_workflow_edge_connections fk_default_workflow_edge_connections_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge_connections
+ ADD CONSTRAINT fk_default_workflow_edge_connections_0 FOREIGN KEY (child_input_id) REFERENCES qiita.command_parameter(command_parameter_id);
+
+
+--
+-- Name: default_workflow_edge_connections fk_default_workflow_edge_connections_1; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_edge_connections
+ ADD CONSTRAINT fk_default_workflow_edge_connections_1 FOREIGN KEY (default_workflow_edge_id) REFERENCES qiita.default_workflow_edge(default_workflow_edge_id);
+
+
+--
+-- Name: default_workflow_data_type fk_default_workflow_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.default_workflow_data_type
+ ADD CONSTRAINT fk_default_workflow_id FOREIGN KEY (default_workflow_id) REFERENCES qiita.default_workflow(default_workflow_id);
+
+
+--
+-- Name: ebi_run_accession fk_ebi_run_accesion_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.ebi_run_accession
+ ADD CONSTRAINT fk_ebi_run_accesion_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: ebi_run_accession fk_ebi_run_accession; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.ebi_run_accession
+ ADD CONSTRAINT fk_ebi_run_accession FOREIGN KEY (sample_id) REFERENCES qiita.study_sample(sample_id);
+
+
+--
+-- Name: study_tags fk_email; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_tags
+ ADD CONSTRAINT fk_email FOREIGN KEY (email) REFERENCES qiita.qiita_user(email);
+
+
+--
+-- Name: filepath fk_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath
+ ADD CONSTRAINT fk_filepath FOREIGN KEY (filepath_type_id) REFERENCES qiita.filepath_type(filepath_type_id);
+
+
+--
+-- Name: filepath fk_filepath_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath
+ ADD CONSTRAINT fk_filepath_0 FOREIGN KEY (checksum_algorithm_id) REFERENCES qiita.checksum_algorithm(checksum_algorithm_id);
+
+
+--
+-- Name: filepath fk_filepath_data_directory; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.filepath
+ ADD CONSTRAINT fk_filepath_data_directory FOREIGN KEY (data_directory_id) REFERENCES qiita.data_directory(data_directory_id) ON UPDATE RESTRICT ON DELETE RESTRICT;
+
+
+--
+-- Name: prep_template_filepath fk_filepath_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_filepath
+ ADD CONSTRAINT fk_filepath_id FOREIGN KEY (filepath_id) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: sample_template_filepath fk_filepath_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.sample_template_filepath
+ ADD CONSTRAINT fk_filepath_id FOREIGN KEY (filepath_id) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: investigation_study fk_investigation_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation_study
+ ADD CONSTRAINT fk_investigation_study FOREIGN KEY (investigation_id) REFERENCES qiita.investigation(investigation_id);
+
+
+--
+-- Name: investigation fk_investigation_study_person; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation
+ ADD CONSTRAINT fk_investigation_study_person FOREIGN KEY (contact_person_id) REFERENCES qiita.study_person(study_person_id);
+
+
+--
+-- Name: investigation_study fk_investigation_study_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.investigation_study
+ ADD CONSTRAINT fk_investigation_study_study FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: logging fk_logging_severity; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.logging
+ ADD CONSTRAINT fk_logging_severity FOREIGN KEY (severity_id) REFERENCES qiita.severity(severity_id);
+
+
+--
+-- Name: message_user fk_message_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.message_user
+ ADD CONSTRAINT fk_message_user FOREIGN KEY (message_id) REFERENCES qiita.message(message_id);
+
+
+--
+-- Name: message_user fk_message_user_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.message_user
+ ADD CONSTRAINT fk_message_user_0 FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
+
+
+--
+-- Name: oauth_software fk_oauth_software; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.oauth_software
+ ADD CONSTRAINT fk_oauth_software FOREIGN KEY (client_id) REFERENCES qiita.oauth_identifiers(client_id);
+
+
+--
+-- Name: oauth_software fk_oauth_software_software; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.oauth_software
+ ADD CONSTRAINT fk_oauth_software_software FOREIGN KEY (software_id) REFERENCES qiita.software(software_id);
+
+
+--
+-- Name: parameter_artifact_type fk_parameter_artifact_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parameter_artifact_type
+ ADD CONSTRAINT fk_parameter_artifact_type FOREIGN KEY (command_parameter_id) REFERENCES qiita.command_parameter(command_parameter_id);
+
+
+--
+-- Name: parameter_artifact_type fk_parameter_artifact_type_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parameter_artifact_type
+ ADD CONSTRAINT fk_parameter_artifact_type_0 FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id);
+
+
+--
+-- Name: parent_artifact fk_parent_artifact_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_artifact
+ ADD CONSTRAINT fk_parent_artifact_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: parent_artifact fk_parent_artifact_parent; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_artifact
+ ADD CONSTRAINT fk_parent_artifact_parent FOREIGN KEY (parent_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: parent_processing_job fk_parent_processing_job_child; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_processing_job
+ ADD CONSTRAINT fk_parent_processing_job_child FOREIGN KEY (child_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: parent_processing_job fk_parent_processing_job_parent; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.parent_processing_job
+ ADD CONSTRAINT fk_parent_processing_job_parent FOREIGN KEY (parent_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: prep_template_sample fk_prep_template; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_sample
+ ADD CONSTRAINT fk_prep_template FOREIGN KEY (prep_template_id) REFERENCES qiita.prep_template(prep_template_id);
+
+
+--
+-- Name: prep_template fk_prep_template_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template
+ ADD CONSTRAINT fk_prep_template_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: prep_template fk_prep_template_data_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template
+ ADD CONSTRAINT fk_prep_template_data_type FOREIGN KEY (data_type_id) REFERENCES qiita.data_type(data_type_id);
+
+
+--
+-- Name: prep_template_filepath fk_prep_template_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_filepath
+ ADD CONSTRAINT fk_prep_template_id FOREIGN KEY (prep_template_id) REFERENCES qiita.prep_template(prep_template_id);
+
+
+--
+-- Name: preparation_artifact fk_prep_template_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.preparation_artifact
+ ADD CONSTRAINT fk_prep_template_id FOREIGN KEY (prep_template_id) REFERENCES qiita.prep_template(prep_template_id);
+
+
+--
+-- Name: prep_template_processing_job fk_prep_template_processing_job_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_processing_job
+ ADD CONSTRAINT fk_prep_template_processing_job_job FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: prep_template_processing_job fk_prep_template_processing_job_pt; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.prep_template_processing_job
+ ADD CONSTRAINT fk_prep_template_processing_job_pt FOREIGN KEY (prep_template_id) REFERENCES qiita.prep_template(prep_template_id);
+
+
+--
+-- Name: processing_job fk_processing_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job
+ ADD CONSTRAINT fk_processing_job FOREIGN KEY (command_id) REFERENCES qiita.software_command(command_id);
+
+
+--
+-- Name: processing_job fk_processing_job_logging; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job
+ ADD CONSTRAINT fk_processing_job_logging FOREIGN KEY (logging_id) REFERENCES qiita.logging(logging_id);
+
+
+--
+-- Name: processing_job fk_processing_job_qiita_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job
+ ADD CONSTRAINT fk_processing_job_qiita_user FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
+
+
+--
+-- Name: processing_job fk_processing_job_status; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job
+ ADD CONSTRAINT fk_processing_job_status FOREIGN KEY (processing_job_status_id) REFERENCES qiita.processing_job_status(processing_job_status_id);
+
+
+--
+-- Name: processing_job_validator fk_processing_job_validator_c; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_validator
+ ADD CONSTRAINT fk_processing_job_validator_c FOREIGN KEY (validator_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: processing_job_validator fk_processing_job_validator_p; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_validator
+ ADD CONSTRAINT fk_processing_job_validator_p FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: processing_job_workflow fk_processing_job_workflow; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow
+ ADD CONSTRAINT fk_processing_job_workflow FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
+
+
+--
+-- Name: processing_job_workflow_root fk_processing_job_workflow_root_job; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow_root
+ ADD CONSTRAINT fk_processing_job_workflow_root_job FOREIGN KEY (processing_job_workflow_id) REFERENCES qiita.processing_job_workflow(processing_job_workflow_id);
+
+
+--
+-- Name: processing_job_workflow_root fk_processing_job_workflow_root_wf; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.processing_job_workflow_root
+ ADD CONSTRAINT fk_processing_job_workflow_root_wf FOREIGN KEY (processing_job_id) REFERENCES qiita.processing_job(processing_job_id);
+
+
+--
+-- Name: reference fk_reference_sequence_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.reference
+ ADD CONSTRAINT fk_reference_sequence_filepath FOREIGN KEY (sequence_filepath) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: reference fk_reference_taxonomy_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.reference
+ ADD CONSTRAINT fk_reference_taxonomy_filepath FOREIGN KEY (taxonomy_filepath) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: reference fk_reference_tree_filepath; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.reference
+ ADD CONSTRAINT fk_reference_tree_filepath FOREIGN KEY (tree_filepath) REFERENCES qiita.filepath(filepath_id);
+
+
+--
+-- Name: study_sample fk_required_sample_info_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_sample
+ ADD CONSTRAINT fk_required_sample_info_study FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: software_artifact_type fk_software_artifact_type_at; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_artifact_type
+ ADD CONSTRAINT fk_software_artifact_type_at FOREIGN KEY (artifact_type_id) REFERENCES qiita.artifact_type(artifact_type_id);
+
+
+--
+-- Name: software_artifact_type fk_software_artifact_type_sw; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_artifact_type
+ ADD CONSTRAINT fk_software_artifact_type_sw FOREIGN KEY (software_id) REFERENCES qiita.software(software_id);
+
+
+--
+-- Name: software_command fk_software_command_software; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_command
+ ADD CONSTRAINT fk_software_command_software FOREIGN KEY (software_id) REFERENCES qiita.software(software_id);
+
+
+--
+-- Name: software_publication fk_software_publication; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_publication
+ ADD CONSTRAINT fk_software_publication FOREIGN KEY (software_id) REFERENCES qiita.software(software_id);
+
+
+--
+-- Name: software_publication fk_software_publication_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software_publication
+ ADD CONSTRAINT fk_software_publication_0 FOREIGN KEY (publication_doi) REFERENCES qiita.publication(doi);
+
+
+--
+-- Name: software fk_software_software_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.software
+ ADD CONSTRAINT fk_software_software_type FOREIGN KEY (software_type_id) REFERENCES qiita.software_type(software_type_id);
+
+
+--
+-- Name: study_artifact fk_study_artifact_artifact; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_artifact
+ ADD CONSTRAINT fk_study_artifact_artifact FOREIGN KEY (artifact_id) REFERENCES qiita.artifact(artifact_id);
+
+
+--
+-- Name: study_artifact fk_study_artifact_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_artifact
+ ADD CONSTRAINT fk_study_artifact_study FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study_environmental_package fk_study_environmental_package; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_environmental_package
+ ADD CONSTRAINT fk_study_environmental_package FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study_environmental_package fk_study_environmental_package_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_environmental_package
+ ADD CONSTRAINT fk_study_environmental_package_0 FOREIGN KEY (environmental_package_name) REFERENCES qiita.environmental_package(environmental_package_name);
+
+
+--
+-- Name: sample_template_filepath fk_study_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.sample_template_filepath
+ ADD CONSTRAINT fk_study_id FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: per_study_tags fk_study_id; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.per_study_tags
+ ADD CONSTRAINT fk_study_id FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study_portal fk_study_portal; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_portal
+ ADD CONSTRAINT fk_study_portal FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study_portal fk_study_portal_0; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_portal
+ ADD CONSTRAINT fk_study_portal_0 FOREIGN KEY (portal_type_id) REFERENCES qiita.portal_type(portal_type_id);
+
+
+--
+-- Name: study_prep_template fk_study_prep_template_pt; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_prep_template
+ ADD CONSTRAINT fk_study_prep_template_pt FOREIGN KEY (prep_template_id) REFERENCES qiita.prep_template(prep_template_id);
+
+
+--
+-- Name: study_prep_template fk_study_prep_template_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_prep_template
+ ADD CONSTRAINT fk_study_prep_template_study FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study fk_study_study_lab_person; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT fk_study_study_lab_person FOREIGN KEY (lab_person_id) REFERENCES qiita.study_person(study_person_id);
+
+
+--
+-- Name: study fk_study_study_pi_person; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT fk_study_study_pi_person FOREIGN KEY (principal_investigator_id) REFERENCES qiita.study_person(study_person_id);
+
+
+--
+-- Name: per_study_tags fk_study_tags; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.per_study_tags
+ ADD CONSTRAINT fk_study_tags FOREIGN KEY (study_tag) REFERENCES qiita.study_tags(study_tag);
+
+
+--
+-- Name: study fk_study_timeseries_type; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT fk_study_timeseries_type FOREIGN KEY (timeseries_type_id) REFERENCES qiita.timeseries_type(timeseries_type_id);
+
+
+--
+-- Name: study fk_study_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study
+ ADD CONSTRAINT fk_study_user FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
+
+
+--
+-- Name: study_users fk_study_users_study; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_users
+ ADD CONSTRAINT fk_study_users_study FOREIGN KEY (study_id) REFERENCES qiita.study(study_id);
+
+
+--
+-- Name: study_users fk_study_users_user; Type: FK CONSTRAINT; Schema: qiita
+--
+
+ALTER TABLE ONLY qiita.study_users
+ ADD CONSTRAINT fk_study_users_user FOREIGN KEY (email) REFERENCES qiita.qiita_user(email) ON UPDATE CASCADE;
-CREATE INDEX idx_analysis_sample_0 ON qiita.analysis_sample ( processed_data_id );
-CREATE INDEX idx_analysis_sample_1 ON qiita.analysis_sample ( sample_id );
+--
+-- Name: term fk_term_ontology; Type: FK CONSTRAINT; Schema: qiita
+--
-CREATE TABLE qiita.common_prep_info (
- prep_template_id bigint NOT NULL,
- sample_id varchar NOT NULL,
- center_name varchar ,
- center_project_name varchar ,
- emp_status_id bigint NOT NULL,
- CONSTRAINT idx_common_prep_info PRIMARY KEY ( prep_template_id, sample_id ),
- CONSTRAINT fk_required_prep_info_emp_status FOREIGN KEY ( emp_status_id ) REFERENCES qiita.emp_status( emp_status_id ) ,
- CONSTRAINT fk_common_prep_info FOREIGN KEY ( sample_id ) REFERENCES qiita.required_sample_info( sample_id ) ,
- CONSTRAINT fk_prep_template FOREIGN KEY ( prep_template_id ) REFERENCES qiita.prep_template( prep_template_id )
- );
+ALTER TABLE ONLY qiita.term
+ ADD CONSTRAINT fk_term_ontology FOREIGN KEY (ontology_id) REFERENCES qiita.ontology(ontology_id);
-CREATE INDEX idx_required_prep_info_0 ON qiita.common_prep_info ( emp_status_id );
-CREATE INDEX idx_required_prep_info_2 ON qiita.common_prep_info ( sample_id );
+--
+-- Name: qiita_user fk_user_user_level; Type: FK CONSTRAINT; Schema: qiita
+--
-CREATE INDEX idx_common_prep_info_0 ON qiita.common_prep_info ( sample_id );
+ALTER TABLE ONLY qiita.qiita_user
+ ADD CONSTRAINT fk_user_user_level FOREIGN KEY (user_level_id) REFERENCES qiita.user_level(user_level_id) ON UPDATE RESTRICT;
-CREATE INDEX idx_common_prep_info_1 ON qiita.common_prep_info ( prep_template_id );
-COMMENT ON COLUMN qiita.common_prep_info.prep_template_id IS 'The prep template identifier';
+--
+-- PostgreSQL database dump complete
+--
diff --git a/qiita_db/util.py b/qiita_db/util.py
index 07b4bdf3e..ebfe0ed11 100644
--- a/qiita_db/util.py
+++ b/qiita_db/util.py
@@ -410,7 +410,11 @@ def get_db_files_base_dir():
"""
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT base_data_dir FROM settings")
- return qdb.sql_connection.TRN.execute_fetchlast()
+ basedir = qdb.sql_connection.TRN.execute_fetchlast()
+ # making sure that it never ends in a "/" as most tests expect this
+ if basedir.endswith("/"):
+ basedir = basedir[:-1]
+ return basedir
def get_work_base_dir():
diff --git a/qiita_pet/handlers/download.py b/qiita_pet/handlers/download.py
index bf0242773..5ce61aa6a 100644
--- a/qiita_pet/handlers/download.py
+++ b/qiita_pet/handlers/download.py
@@ -427,7 +427,8 @@ def get(self):
infofile._filepath_table, infofile._id_column, infofile.id,
sort='descending')[0]
- basedir_len = len(get_db_files_base_dir()) + 1
+ basedir = get_db_files_base_dir()
+ basedir_len = len(basedir) + 1
fp = x['fp'][basedir_len:]
to_download.append((fp, fp, '-', str(x['fp_size'])))
self._write_nginx_file_list(to_download)
diff --git a/scripts/qiita-env b/scripts/qiita-env
index 3afa8709a..4cf284393 100755
--- a/scripts/qiita-env
+++ b/scripts/qiita-env
@@ -35,8 +35,17 @@ def env():
'"password"')
def make(load_ontologies, download_reference, add_demo_user):
"""Creates the database specified in config"""
- qdb.environment_manager.make_environment(
- load_ontologies, download_reference, add_demo_user)
+ try:
+ qdb.environment_manager.make_environment(
+ load_ontologies, download_reference, add_demo_user)
+ except Exception as e:
+ if "Database qiita_test already present on the system." not in str(e):
+ # this will clean our environment so we can try again without
+ # having to have an other window open to remove the current
+ # environment. This is fine as we are actually creating a new
+ # environment.
+ qdb.environment_manager.drop_environment(False)
+ raise e
@env.command()