) {\\n chop;\\n if (m/^>/) {\\n s/^>//;\\n if ($. > 1) {\\n print OUT sprintf(\\\"%.3f\\\", $gc/$length) . \\\"\\\\n\\\";\\n }\\n $gc = 0;\\n $length = 0;\\n } else {\\n ++$gc while m/[gc]/ig;\\n $length += length $_;\\n }\\n}\\nprint OUT sprintf(\\\"%.3f\\\", $gc/$length) . \\\"\\\\n\\\";\\nclose( IN );\\nclose( OUT );\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"perl version of gc counter from planemo example\", \"help_text\": \"**What it Does**\\ncounts gc using, ugh, perl...\\n\", \"citations\": []}, \"ppass\": {\"parampass\": \"positional\", \"__current_case__\": 1, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"fasta\"], \"input_label\": \"input fasta file\", \"input_help\": \"parameter_help\", \"input_CL\": \"1\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"output\", \"history_format\": \"tsv\", \"history_CL\": \"2\"}], \"edit_params\": \"no\", \"additional_parameters\": []}, \"tool_name\": \"perlgc\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "3b6aab01-4759-4df6-801f-626678639e51",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "f964e779-2f92-4c81-9819-3e1ebc156664"
+ },
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "7aea56bd-4f39-4d3b-8254-a6675161d059"
+ }
+ ]
+ },
+ "3": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 3,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 492,
+ "height": 202,
+ "left": 613,
+ "right": 813,
+ "top": 290,
+ "width": 200,
+ "x": 613,
+ "y": 290
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"system\", \"__current_case__\": 1, \"exe_package\": \"sed\", \"exe_package_version\": \"\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"sed runner\", \"help_text\": \"sed '/old/new/g input.txt\", \"citations\": []}, \"ppass\": {\"parampass\": \"positional\", \"__current_case__\": 1, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"txt\"], \"input_label\": \"input text\", \"input_help\": \"parameter_help\", \"input_CL\": \"3\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"output\", \"history_format\": \"txt\", \"history_CL\": \"STDOUT\"}], \"edit_params\": \"yes\", \"additional_parameters\": [{\"__index__\": 0, \"param_name\": \"sedstring\", \"param_type\": \"text\", \"param_value\": \"s/def/bjork!bjorkdef/g\", \"param_label\": \"parameter_label\", \"param_help\": \"parameter_help\", \"param_CL\": \"1\", \"param_CLprefixed\": \"\"}]}, \"tool_name\": \"sedtest\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "2138c717-5128-4c4b-bc22-4809cd001c34",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "b0be8c95-7380-42b8-a16d-8e08578d4dd7"
+ },
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "56635519-a9a0-49eb-8305-59cc1fcef99f"
+ }
+ ]
+ },
+ "4": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 4,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 652,
+ "height": 242,
+ "left": 613,
+ "right": 813,
+ "top": 410,
+ "width": 200,
+ "x": 613,
+ "y": 410
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"python\", \"__current_case__\": 2, \"interpreter_version\": \"\", \"exe_package_version\": \"\", \"dynScript\": \"# reverse order of text by row\\nimport sys\\ninp = sys.argv[1]\\noutp = sys.argv[2]\\nappendme = sys.argv[3]\\ni = open(inp,'r').readlines()\\no = open(outp,'w')\\nfor row in i:\\n rs = row.rstrip()\\n rs = list(rs)\\n rs.reverse()\\n o.write(''.join(rs))\\n o.write(appendme)\\n o.write('\\\\n')\\no.close()\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"pyrevpos\", \"help_text\": \"**What it Does**\", \"citations\": []}, \"ppass\": {\"parampass\": \"positional\", \"__current_case__\": 1, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"txt\"], \"input_label\": \"inputfile\", \"input_help\": \"parameter_help\", \"input_CL\": \"1\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"output\", \"history_format\": \"txt\", \"history_CL\": \"2\"}], \"edit_params\": \"yes\", \"additional_parameters\": [{\"__index__\": 0, \"param_name\": \"appendme\", \"param_type\": \"text\", \"param_value\": \"added at the end\", \"param_label\": \"append string\", \"param_help\": \"parameter_help\", \"param_CL\": \"3\", \"param_CLprefixed\": \"\"}]}, \"tool_name\": \"pyrevaddpos\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "30dbe033-30c4-4228-b0cb-854df30f5594",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "37fdd905-471d-4479-a98a-4dfbaa6314be"
+ },
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "7c8a8dba-1e8c-49d5-b51d-a0ab09931932"
+ }
+ ]
+ },
+ "5": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 5,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 772,
+ "height": 242,
+ "left": 613,
+ "right": 813,
+ "top": 530,
+ "width": 200,
+ "x": 613,
+ "y": 530
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"python\", \"__current_case__\": 2, \"interpreter_version\": \"\", \"exe_package_version\": \"\", \"dynScript\": \"# reverse order of text by row\\nimport sys\\nimport argparse\\nparser = argparse.ArgumentParser()\\na = parser.add_argument\\na('--infile',default='')\\na('--outfile',default=None)\\nargs = parser.parse_args()\\ninp = args.infile\\noutp = args.outfile\\ni = open(inp,'r').readlines()\\no = open(outp,'w')\\nfor row in i:\\n rs = row.rstrip()\\n rs = list(rs)\\n rs.reverse()\\n o.write(''.join(rs))\\n o.write('\\\\n')\\no.close()\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"reverse argparse\", \"help_text\": \"**What it Does**\", \"citations\": []}, \"ppass\": {\"parampass\": \"argparse\", \"__current_case__\": 0, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"txt\"], \"input_label\": \"infile\", \"input_help\": \"parameter_help\", \"input_CL\": \"infile\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"outfile\", \"history_format\": \"txt\", \"history_CL\": \"outfile\"}], \"edit_params\": \"yes\", \"additional_parameters\": []}, \"tool_name\": \"pyrevargparse\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "91a0dccf-384c-491a-ae08-f426888d26cf",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "61a5271d-3940-4855-9093-a0710dc3fe08"
+ },
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "a6602e23-dc1c-44b7-8ed7-cd9971ff9d30"
+ }
+ ]
+ },
+ "6": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 6,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 852,
+ "height": 202,
+ "left": 613,
+ "right": 813,
+ "top": 650,
+ "width": 200,
+ "x": 613,
+ "y": 650
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"bash\", \"__current_case__\": 5, \"interpreter_version\": \"\", \"exe_package_version\": \"\", \"dynScript\": \"rev | tac\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"tacrev\", \"help_text\": \"**What it Does**\", \"citations\": []}, \"ppass\": {\"parampass\": \"0\", \"__current_case__\": 2, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"txt\"], \"input_label\": \"input file\", \"input_help\": \"parameter_help\", \"input_CL\": \"1\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"outfile\", \"history_format\": \"txt\", \"history_CL\": \"2\"}]}, \"tool_name\": \"tacrev\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "edb5d852-908b-45bf-8892-e0e8c337c31d",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "c1394cf9-bb03-4ac3-8466-8ee0cc30c0a0"
+ },
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "e45566f4-d40e-4ad0-ad27-72ce814b13da"
+ }
+ ]
+ },
+ "7": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 7,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 992,
+ "height": 222,
+ "left": 613,
+ "right": 813,
+ "top": 770,
+ "width": 200,
+ "x": 613,
+ "y": 770
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"python\", \"__current_case__\": 2, \"interpreter_version\": \"\", \"exe_package_version\": \"\", \"dynScript\": \"# reverse order of text by row\\nimport sys\\ninp = sys.argv[1]\\noutp = sys.argv[2]\\ni = open(inp,'r').readlines()\\no = open(outp,'w')\\nfor row in i:\\n rs = row.rstrip()\\n rs = list(rs)\\n rs.reverse()\\n o.write(''.join(rs))\\n o.write('\\\\n')\\no.close()\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"pyrevpos\", \"help_text\": \"**What it Does**\", \"citations\": []}, \"ppass\": {\"parampass\": \"positional\", \"__current_case__\": 1, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"txt\"], \"input_label\": \"inputfile\", \"input_help\": \"parameter_help\", \"input_CL\": \"1\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"output\", \"history_format\": \"txt\", \"history_CL\": \"2\"}], \"edit_params\": \"yes\", \"additional_parameters\": []}, \"tool_name\": \"pyrevpos\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "08a48555-8700-4652-a76b-df1f54197049",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "e96ae086-a92a-4018-8f07-ebf4974807e6"
+ },
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "b5bd73bb-1ddc-4161-be2e-370bab9aebbe"
+ }
+ ]
+ },
+ "8": {
+ "annotation": "",
+ "content_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "errors": null,
+ "id": 8,
+ "input_connections": {
+ "ppass|history_inputs_0|input_files": {
+ "id": 7,
+ "output_name": "new_tool"
+ }
+ },
+ "inputs": [],
+ "label": null,
+ "name": "toolfactory",
+ "outputs": [
+ {
+ "name": "TF_run_report",
+ "type": "input"
+ },
+ {
+ "name": "new_tool",
+ "type": "tgz"
+ }
+ ],
+ "position": {
+ "bottom": 412,
+ "height": 242,
+ "left": 833,
+ "right": 1033,
+ "top": 170,
+ "width": 200,
+ "x": 833,
+ "y": 170
+ },
+ "post_job_actions": {},
+ "tool_id": "toolshed.g2.bx.psu.edu/repos/fubar/tool_factory_2/rgTF2/2.00",
+ "tool_shed_repository": {
+ "changeset_revision": "51fa77152988",
+ "name": "tool_factory_2",
+ "owner": "fubar",
+ "tool_shed": "toolshed.g2.bx.psu.edu"
+ },
+ "tool_state": "{\"__input_ext\": \"input\", \"chromInfo\": \"/home/ross/galaxy/tool-data/shared/ucsc/chrom/?.len\", \"interexe\": {\"interpreter\": \"python\", \"__current_case__\": 2, \"interpreter_version\": \"\", \"exe_package_version\": \"\", \"dynScript\": \"import argparse\\nimport tarfile\\nimport os\\nimport tempfile\\nimport subprocess\\n\\n\\\"\\\"\\\"\\nplanemo test --no_cleanup --no_dependency_resolution --skip_venv --galaxy_root ~/galaxy ~/galaxy/tools/tool_makers/pyrevargparse/ &> pyrevargparse\\n\\\"\\\"\\\"\\n\\nparser = argparse.ArgumentParser()\\na = parser.add_argument\\na('--tooltgz',default='')\\na('--report',default=None)\\na('--toolout',default=None)\\na('--galaxy_root',default=None)\\nargs = parser.parse_args()\\ntoolname = args.toolout.split(os.sep)[-1]\\ntoolpath = os.path.join(args.galaxy_root,args.toolout)\\ntf = tarfile.open(args.tooltgz,\\\"r:gz\\\")\\ntf.extractall(toolpath)\\ncl = \\\"planemo test --skip_venv --galaxy_root %s %s\\\" % (args.galaxy_root,toolpath)\\ncll = cl.split(' ')\\nsto = open(args.report, 'w')\\np = subprocess.run(cll, shell=False, stdout=sto)\\nretval = p.returncode\\nsto.close()\\n\"}, \"makeMode\": {\"make_Tool\": \"yes\", \"__current_case__\": 0, \"tool_version\": \"0.01\", \"tool_desc\": \"Tool to test toolshed tool archives generated by the tool factory.\", \"help_text\": \"**What it Does**\\n\\nGiven a toolshed tgz file generated by a tool factory run, this will unpack it and run planemo test, returning the planemo stdout as a report\\nIt was generated using the tool factory.\", \"citations\": []}, \"ppass\": {\"parampass\": \"argparse\", \"__current_case__\": 0, \"history_inputs\": [{\"__index__\": 0, \"input_files\": {\"__class__\": \"ConnectedValue\"}, \"input_formats\": [\"tgz\"], \"input_label\": \"tool toolshed tgz archive from history\", \"input_help\": \"Run planemo test on a tool shed tool archive tgz format file generated by the ToolFactory or Planemo\", \"input_CL\": \"tooltgz\"}], \"history_outputs\": [{\"__index__\": 0, \"history_name\": \"report\", \"history_format\": \"txt\", \"history_CL\": \"report\"}], \"edit_params\": \"yes\", \"additional_parameters\": [{\"__index__\": 0, \"param_name\": \"toolout\", \"param_type\": \"text\", \"param_value\": \"tools/toolmakers/planemotest\", \"param_label\": \"output path under galaxy root\", \"param_help\": \"This is where the tgz file will be extracted and tested by planemo\", \"param_CL\": \"toolout\", \"param_CLprefixed\": \"\"}, {\"__index__\": 1, \"param_name\": \"galaxy_root\", \"param_type\": \"text\", \"param_value\": \"/home/ross/galaxy\", \"param_label\": \"Galaxy source root directory to use for running planemo\", \"param_help\": \"This will form the galaxy_root parameter for rnning planemo using an existing Galaxy source tree, and the tgz will be extracted at a path relative to that rootu\", \"param_CL\": \"galaxy_root\", \"param_CLprefixed\": \"\"}]}, \"tool_name\": \"planemotest\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.00",
+ "type": "tool",
+ "uuid": "b9bfb1a4-4c0c-4d39-9e74-223da72f8abc",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "TF_run_report",
+ "uuid": "09ba44ea-4da8-46f5-a411-ca054ccedd3b"
+ },
+ {
+ "label": null,
+ "output_name": "new_tool",
+ "uuid": "50a8ff4a-702a-4983-8202-8a79c0a3c978"
+ }
+ ]
+ }
+ },
+ "tags": [],
+ "uuid": "321a7f9f-c287-453c-807a-43afd948770e",
+ "version": 0
+}
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/dockerfile.seq
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/dockerfile.seq Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,25 @@
+# Galaxy - Toolshed docker
+
+FROM quay.io/bgruening/galaxy:19.01
+
+MAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com
+
+ENV GALAXY_CONFIG_BRAND ToolFactory
+ENV GALAXY_CONFIG_SANITIZE_ALL_HTML false
+
+# Install tools
+#ADD data_managers.yaml $GALAXY_ROOT/data_managers.yaml
+#RUN install-tools $GALAXY_ROOT/data_managers.yaml && \
+# /tool_deps/_conda/bin/conda clean --tarballs && \
+# rm /export/galaxy-central/ -rf
+ADD my_tool_list.yml $GALAXY_ROOT/tools1.yaml
+RUN install-tools $GALAXY_ROOT/tools1.yaml && \
+ /tool_deps/_conda/bin/conda clean --tarballs && \
+ rm /export/galaxy-central/ -rf
+
+ADD TF_example_wf.ga $GALAXY_HOME/workflows/TF_example_wf.ga
+
+ADD post-start-actions.sh /export/post-start-actions.sh
+RUN chmod a+x /export/post-start-actions.sh
+
+
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/my_tool_list.yml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/my_tool_list.yml Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,9 @@
+install_resolver_dependencies: true
+install_tool_dependencies: false
+tools:
+- name: tool_factory_2
+ owner: fubar
+ tool_panel_section_label: 'Make new Tools'
+ tool_shed_url: https://toolshed.g2.bx.psu.edu
+
+
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/post-start-actions.sh
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/post-start-actions.sh Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,5 @@
+#!/bin/bash
+# hook to install tf demo workflow
+echo "#### post start actions.sh hook happening"
+chown $GALAXY_USER $GALAXY_ROOT/workflows/TF_example_wf.ga
+workflow-install -w $GALAXY_ROOT/workflows/TF_example_wf.ga -g http://localhost -a fakekey --publish_workflows
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/startgaldock.sh
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/startgaldock.sh Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,1 @@
+docker run -d -p 8080:80 -v /home/ubuntu/galaxy_storage/:/export/ toolfactory
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/startup
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/startup Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,460 @@
+#!/usr/bin/env bash
+
+# Migration path for old images that had the tool_deps under /export/galaxy-central/tool_deps/
+
+if [ -d "/export/galaxy-central/tool_deps/" ] && [ ! -L "/export/galaxy-central/tool_deps/" ]; then
+ mkdir -p /export/tool_deps/
+ mv /export/galaxy-central/tool_deps /export/
+ ln -s /export/tool_deps/ $GALAXY_ROOT/
+fi
+
+# This is needed for Docker compose to have a unified alias for the main container.
+# Modifying /etc/hosts can only happen during runtime not during build-time
+echo "127.0.0.1 galaxy" >> /etc/hosts
+
+# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2
+ansible localhost -m ini_file -a "dest=/etc/supervisor/conf.d/galaxy.conf section=program:handler option=numprocs value=${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null
+
+# If the Galaxy config file is not in the expected place, copy from the sample
+# and hope for the best (that the admin has done all the setup through env vars.)
+if [ ! -f $GALAXY_CONFIG_FILE ]
+ then
+ # this should succesfully copy either .yml or .ini sample file to the expected location
+ cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE
+fi
+
+# Configure proxy prefix filtering
+if [[ ! -z $PROXY_PREFIX ]]
+ then
+ if [ ${GALAXY_CONFIG_FILE: -4} == ".ini" ]
+ then
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}" &> /dev/null
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=app:main option=filter-with value=proxy-prefix" &> /dev/null
+ else
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ module:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ socket:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ mount:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ manage-script-name:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' manage-script-name: true'" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' mount: ${PROXY_PREFIX}=galaxy.webapps.galaxy.buildapp:uwsgi_app()'" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' socket: unix:///srv/galaxy/var/uwsgi.sock'" &> /dev/null
+
+ # Also set SCRIPT_NAME. It's not always necessary due to manage-script-name: true in galaxy.yml, but it makes life easier in this container + it does no harm
+ ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf regexp='^ uwsgi_param SCRIPT_NAME' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf insertafter='^ include uwsgi_params' line=' uwsgi_param SCRIPT_NAME ${PROXY_PREFIX};'" &> /dev/null
+ fi
+
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}/reports" &> /dev/null
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=app:main option=filter-with value=proxy-prefix" &> /dev/null
+
+ # Fix path to html assets
+ ansible localhost -m replace -a "dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\"|\')[/\\w]*(/static)' replace='\\1${PROXY_PREFIX}\\2'" &> /dev/null
+
+ # Set some other vars based on that prefix
+ if [ "x$GALAXY_CONFIG_COOKIE_PATH" == "x" ]
+ then
+ export GALAXY_CONFIG_COOKIE_PATH="$PROXY_PREFIX"
+ fi
+ if [ "x$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" == "x" ]
+ then
+ export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX="$PROXY_PREFIX/gie_proxy"
+ fi
+
+ # Change the defaults nginx upload/x-accel paths
+ if [ "$GALAXY_CONFIG_NGINX_UPLOAD_PATH" == "/_upload" ]
+ then
+ export GALAXY_CONFIG_NGINX_UPLOAD_PATH="${PROXY_PREFIX}${GALAXY_CONFIG_NGINX_UPLOAD_PATH}"
+ fi
+fi
+
+# Disable authentication of Galaxy reports
+if [[ ! -z $DISABLE_REPORTS_AUTH ]]
+ then
+ # disable authentification
+ echo "Disable Galaxy reports authentification "
+ echo "" > /etc/nginx/conf.d/reports_auth.conf
+ else
+ # enable authentification
+ echo "Enable Galaxy reports authentification "
+ cp /etc/nginx/conf.d/reports_auth.conf.source /etc/nginx/conf.d/reports_auth.conf
+fi
+
+# Try to guess if we are running under --privileged mode
+if [[ ! -z $HOST_DOCKER_LEGACY ]]; then
+ if mount | grep "/proc/kcore"; then
+ PRIVILEGED=false
+ else
+ PRIVILEGED=true
+ fi
+else
+ # Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode
+ ip link add dummy0 type dummy 2>/dev/null
+ if [[ $? -eq 0 ]]; then
+ PRIVILEGED=true
+ # clean the dummy0 link
+ ip link delete dummy0 2>/dev/null
+ else
+ PRIVILEGED=false
+ fi
+fi
+
+cd $GALAXY_ROOT
+. $GALAXY_VIRTUAL_ENV/bin/activate
+
+if $PRIVILEGED; then
+ umount /var/lib/docker
+fi
+
+if [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then
+ # If /export/ is mounted, export_user_files file moving all data to /export/
+ # symlinks will point from the original location to the new path under /export/
+ # If /export/ is not given, nothing will happen in that step
+ echo "Checking /export..."
+ python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT
+fi
+
+# Delete compiled templates in case they are out of date
+if [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then
+ rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*
+fi
+
+# Enable loading of dependencies on startup. Such as LDAP.
+# Adapted from galaxyproject/galaxy/scripts/common_startup.sh
+if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]
+ then
+ echo "Installing optional dependencies in galaxy virtual environment..."
+ : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
+ GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib python -c "import galaxy.dependencies; print('\n'.join(galaxy.dependencies.optional('$GALAXY_CONFIG_FILE')))")
+ [ -z "$GALAXY_CONDITIONAL_DEPENDENCIES" ] || echo "$GALAXY_CONDITIONAL_DEPENDENCIES" | pip install -q -r /dev/stdin --index-url "${GALAXY_WHEELS_INDEX_URL}"
+fi
+
+if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]
+ then
+ echo "Installing development requirements in galaxy virtual environment..."
+ : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
+ dev_requirements='./lib/galaxy/dependencies/dev-requirements.txt'
+ [ -f $dev_requirements ] && pip install -q -r $dev_requirements --index-url "${GALAXY_WHEELS_INDEX_URL}"
+fi
+
+# Enable Test Tool Shed
+if [[ ! -z $ENABLE_TTS_INSTALL ]]
+ then
+ echo "Enable installation from the Test Tool Shed."
+ export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml
+fi
+
+# Remove all default tools from Galaxy by default
+if [[ ! -z $BARE ]]
+ then
+ echo "Remove all tools from the tool_conf.xml file."
+ export GALAXY_CONFIG_TOOL_CONFIG_FILE=config/shed_tool_conf.xml,$GALAXY_ROOT/test/functional/tools/upload_tool_conf.xml
+fi
+
+# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool
+if [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]
+ then
+ if [ ! -d "/tool_deps/_conda/envs/__bcftools@1.5" ]; then
+ su $GALAXY_USER -c "/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5"
+ su $GALAXY_USER -c "/tool_deps/_conda/bin/conda clean --tarballs --yes"
+ fi
+fi
+
+if [[ ! -z $GALAXY_EXTRAS_CONFIG_POSTGRES ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ # Backward compatibility for exported postgresql directories before version 15.08.
+ # In previous versions postgres has the UID/GID of 102/106. We changed this in
+ # https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and
+ # GALAXY_POSTGRES_GID=1550
+ if [ -e /export/postgresql/ ];
+ then
+ if [ `stat -c %g /export/postgresql/` == "106" ];
+ then
+ chown -R postgres:postgres /export/postgresql/
+ fi
+ fi
+ fi
+fi
+
+
+if [[ ! -z $GALAXY_EXTRAS_CONFIG_CONDOR ]]; then
+ if [[ ! -z $ENABLE_CONDOR ]]
+ then
+ if [[ ! -z $CONDOR_HOST ]]
+ then
+ echo "Enabling Condor with external scheduler at $CONDOR_HOST"
+ echo "# Config generated by startup.sh
+CONDOR_HOST = $CONDOR_HOST
+ALLOW_ADMINISTRATOR = *
+ALLOW_OWNER = *
+ALLOW_READ = *
+ALLOW_WRITE = *
+ALLOW_CLIENT = *
+ALLOW_NEGOTIATOR = *
+DAEMON_LIST = MASTER, SCHEDD
+UID_DOMAIN = galaxy
+DISCARD_SESSION_KEYRING_ON_STARTUP = False
+TRUST_UID_DOMAIN = true" > /etc/condor/condor_config.local
+ fi
+
+ if [[ -e /export/condor_config ]]
+ then
+ echo "Replacing Condor config by locally supplied config from /export/condor_config"
+ rm -f /etc/condor/condor_config
+ ln -s /export/condor_config /etc/condor/condor_config
+ fi
+ fi
+fi
+
+
+# Copy or link the slurm/munge config files
+if [ -e /export/slurm.conf ]
+then
+ rm -f /etc/slurm-llnl/slurm.conf
+ ln -s /export/slurm.conf /etc/slurm-llnl/slurm.conf
+else
+ # Configure SLURM with runtime hostname.
+ # Use absolute path to python so virtualenv is not used.
+ /usr/bin/python /usr/sbin/configure_slurm.py
+fi
+if [ -e /export/munge.key ]
+then
+ rm -f /etc/munge/munge.key
+ ln -s /export/munge.key /etc/munge/munge.key
+ chmod 400 /export/munge.key
+fi
+
+# link the gridengine config file
+if [ -e /export/act_qmaster ]
+then
+ rm -f /var/lib/gridengine/default/common/act_qmaster
+ ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster
+fi
+
+# Waits until postgres is ready
+function wait_for_postgres {
+ echo "Checking if database is up and running"
+ until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 1; echo "Waiting for database"; done
+ echo "Database connected"
+}
+
+# $NONUSE can be set to include cron, proftp, reports or nodejs
+# if included we will _not_ start these services.
+function start_supervisor {
+ supervisord -c /etc/supervisor/supervisord.conf
+ sleep 5
+
+ if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ echo "Starting postgres"
+ supervisorctl start postgresql
+ fi
+ fi
+
+ wait_for_postgres
+
+ # Make sure the database is automatically updated
+ if [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]
+ then
+ echo "Updating Galaxy database"
+ sh manage_db.sh -c /etc/galaxy/galaxy.yml upgrade
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then
+ if [[ $NONUSE != *"cron"* ]]
+ then
+ echo "Starting cron"
+ supervisorctl start cron
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then
+ if [[ $NONUSE != *"proftp"* ]]
+ then
+ echo "Starting ProFTP"
+ supervisorctl start proftpd
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_REPORTS ]]; then
+ if [[ $NONUSE != *"reports"* ]]
+ then
+ echo "Starting Galaxy reports webapp"
+ supervisorctl start reports
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_IE_PROXY ]]; then
+ if [[ $NONUSE != *"nodejs"* ]]
+ then
+ echo "Starting nodejs"
+ supervisorctl start galaxy:galaxy_nodejs_proxy
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then
+ if [[ $NONUSE != *"condor"* ]]
+ then
+ echo "Starting condor"
+ supervisorctl start condor
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then
+ if [[ $NONUSE != *"slurmctld"* ]]
+ then
+ echo "Starting slurmctld"
+ supervisorctl start slurmctld
+ fi
+ if [[ $NONUSE != *"slurmd"* ]]
+ then
+ echo "Starting slurmd"
+ supervisorctl start slurmd
+ fi
+ supervisorctl start munge
+ else
+ if [[ $NONUSE != *"slurmctld"* ]]
+ then
+ echo "Starting slurmctld"
+ /usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log
+ fi
+ if [[ $NONUSE != *"slurmd"* ]]
+ then
+ echo "Starting slurmd"
+ /usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log
+ fi
+
+ # We need to run munged regardless
+ mkdir -p /var/run/munge && /usr/sbin/munged -f
+ fi
+}
+
+if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ # Change the data_directory of postgresql in the main config file
+ ansible localhost -m lineinfile -a "line='data_directory = \'$PG_DATA_DIR_HOST\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'" &> /dev/null
+ fi
+fi
+
+if $PRIVILEGED; then
+ echo "Enable Galaxy Interactive Environments."
+ export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY="config/plugins/interactive_environments"
+ if [ x$DOCKER_PARENT == "x" ]; then
+ #build the docker in docker environment
+ bash /root/cgroupfs_mount.sh
+ start_supervisor
+ supervisorctl start docker
+ else
+ #inheriting /var/run/docker.sock from parent, assume that you need to
+ #run docker with sudo to validate
+ echo "$GALAXY_USER ALL = NOPASSWD : ALL" >> /etc/sudoers
+ start_supervisor
+ fi
+ if [[ ! -z $PULL_IE_IMAGES ]]; then
+ echo "About to pull IE images. Depending on the size, this may take a while!"
+
+ for ie in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do
+ enabled_var_name="GALAXY_EXTRAS_IE_FETCH_${ie}";
+ if [[ ${!enabled_var_name} ]]; then
+ # Store name in a var
+ image_var_name="GALAXY_EXTRAS_${ie}_IMAGE"
+ # And then read from that var
+ docker pull "${!image_var_name}"
+ fi
+ done
+ fi
+
+ # in privileged mode autofs and CVMFS is available
+ # install autofs
+ echo "Installing autofs to enable automatic CVMFS mounts"
+ apt-get install autofs --no-install-recommends -y
+ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/*
+else
+ echo "Disable Galaxy Interactive Environments. Start with --privileged to enable IE's."
+ export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY=""
+ start_supervisor
+fi
+
+if [ "$USE_HTTPS_LETSENCRYPT" != "False" ]
+then
+ echo "Settting up letsencrypt"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=letsencrypt \
+ --extra-vars galaxy_extras_galaxy_domain="GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL" \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+fi
+if [ "$USE_HTTPS" != "False" ]
+then
+ if [ -f /export/server.key -a -f /export/server.crt ]
+ then
+ echo "Copying SSL keys"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=own \
+ --extra-vars src_nginx_ssl_certificate_key=/export/server.key \
+ --extra-vars src_nginx_ssl_certificate=/export/server.crt \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+ else
+ echo "Setting up self-signed SSL keys"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=self-signed \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+ fi
+fi
+
+# In case the user wants the default admin to be created, do so.
+if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]
+ then
+ echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing"
+ python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
+ -c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
+ # If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.
+ # then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables
+ # visible here.
+ # The file needs to be executable (chmod a+x post-start-actions.sh)
+fi
+if [ -x /export/post-start-actions.sh ]
+ then
+ # uses ephemeris, present in docker-galaxy-stable, to wait for the local instance
+ /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > $GALAXY_LOGS_DIR/post-start-actions.log &&
+ /export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &
+fi
+
+
+# Reinstall tools if the user want to
+if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]
+ then
+ /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > /home/galaxy/logs/post-start-actions.log &&
+ OLDIFS=$IFS
+ IFS=','
+ for TOOL_YML in `echo "$GALAXY_AUTO_UPDATE_TOOLS"`
+ do
+ echo "Installing tools from $TOOL_YML"
+ /tool_deps/_conda/bin/shed-tools install -g "http://127.0.0.1" -a "$GALAXY_DEFAULT_ADMIN_KEY" -t "$TOOL_YML"
+ /tool_deps/_conda/bin/conda clean --tarballs --yes
+ done
+ IFS=$OLDIFS
+fi
+
+# migrate custom IEs or Visualisations (Galaxy plugins)
+# this is needed for by the new client build system
+python3 ${GALAXY_ROOT}/scripts/plugin_staging.py
+
+# Enable verbose output
+if [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = "full" ]
+ then
+ tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log
+ else
+ tail -f $GALAXY_LOGS_DIR/*.log
+fi
+
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/docker/startup.sh
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/docker/startup.sh Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,462 @@
+#!/usr/bin/env bash
+
+# Migration path for old images that had the tool_deps under /export/galaxy-central/tool_deps/
+
+if [ -d "/export/galaxy-central/tool_deps/" ] && [ ! -L "/export/galaxy-central/tool_deps/" ]; then
+ mkdir -p /export/tool_deps/
+ mv /export/galaxy-central/tool_deps /export/
+ ln -s /export/tool_deps/ $GALAXY_ROOT/
+fi
+
+# This is needed for Docker compose to have a unified alias for the main container.
+# Modifying /etc/hosts can only happen during runtime not during build-time
+echo "127.0.0.1 galaxy" >> /etc/hosts
+
+# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2
+ansible localhost -m ini_file -a "dest=/etc/supervisor/conf.d/galaxy.conf section=program:handler option=numprocs value=${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null
+
+# If the Galaxy config file is not in the expected place, copy from the sample
+# and hope for the best (that the admin has done all the setup through env vars.)
+if [ ! -f $GALAXY_CONFIG_FILE ]
+ then
+ # this should succesfully copy either .yml or .ini sample file to the expected location
+ cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE
+fi
+
+# Configure proxy prefix filtering
+if [[ ! -z $PROXY_PREFIX ]]
+ then
+ if [ ${GALAXY_CONFIG_FILE: -4} == ".ini" ]
+ then
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}" &> /dev/null
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=app:main option=filter-with value=proxy-prefix" &> /dev/null
+ else
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ module:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ socket:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ mount:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ manage-script-name:' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' manage-script-name: true'" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' mount: ${PROXY_PREFIX}=galaxy.webapps.galaxy.buildapp:uwsgi_app()'" &> /dev/null
+ ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' socket: unix:///srv/galaxy/var/uwsgi.sock'" &> /dev/null
+
+ # Also set SCRIPT_NAME. It's not always necessary due to manage-script-name: true in galaxy.yml, but it makes life easier in this container + it does no harm
+ ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf regexp='^ uwsgi_param SCRIPT_NAME' state=absent" &> /dev/null
+ ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf insertafter='^ include uwsgi_params' line=' uwsgi_param SCRIPT_NAME ${PROXY_PREFIX};'" &> /dev/null
+ fi
+
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}/reports" &> /dev/null
+ ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=app:main option=filter-with value=proxy-prefix" &> /dev/null
+
+ # Fix path to html assets
+ ansible localhost -m replace -a "dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\"|\')[/\\w]*(/static)' replace='\\1${PROXY_PREFIX}\\2'" &> /dev/null
+
+ # Set some other vars based on that prefix
+ if [ "x$GALAXY_CONFIG_COOKIE_PATH" == "x" ]
+ then
+ export GALAXY_CONFIG_COOKIE_PATH="$PROXY_PREFIX"
+ fi
+ if [ "x$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" == "x" ]
+ then
+ export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX="$PROXY_PREFIX/gie_proxy"
+ fi
+
+ # Change the defaults nginx upload/x-accel paths
+ if [ "$GALAXY_CONFIG_NGINX_UPLOAD_PATH" == "/_upload" ]
+ then
+ export GALAXY_CONFIG_NGINX_UPLOAD_PATH="${PROXY_PREFIX}${GALAXY_CONFIG_NGINX_UPLOAD_PATH}"
+ fi
+fi
+
+# Disable authentication of Galaxy reports
+if [[ ! -z $DISABLE_REPORTS_AUTH ]]
+ then
+ # disable authentification
+ echo "Disable Galaxy reports authentification "
+ echo "" > /etc/nginx/conf.d/reports_auth.conf
+ else
+ # enable authentification
+ echo "Enable Galaxy reports authentification "
+ cp /etc/nginx/conf.d/reports_auth.conf.source /etc/nginx/conf.d/reports_auth.conf
+fi
+
+# Try to guess if we are running under --privileged mode
+if [[ ! -z $HOST_DOCKER_LEGACY ]]; then
+ if mount | grep "/proc/kcore"; then
+ PRIVILEGED=false
+ else
+ PRIVILEGED=true
+ fi
+else
+ # Taken from http://stackoverflow.com/questions/32144575/how-to-know-if-a-docker-container-is-running-in-privileged-mode
+ ip link add dummy0 type dummy 2>/dev/null
+ if [[ $? -eq 0 ]]; then
+ PRIVILEGED=true
+ # clean the dummy0 link
+ ip link delete dummy0 2>/dev/null
+ else
+ PRIVILEGED=false
+ fi
+fi
+
+cd $GALAXY_ROOT
+. $GALAXY_VIRTUAL_ENV/bin/activate
+
+if $PRIVILEGED; then
+ umount /var/lib/docker
+fi
+
+if [[ ! -z $STARTUP_EXPORT_USER_FILES ]]; then
+ # If /export/ is mounted, export_user_files file moving all data to /export/
+ # symlinks will point from the original location to the new path under /export/
+ # If /export/ is not given, nothing will happen in that step
+ echo "Checking /export..."
+ python3 /usr/local/bin/export_user_files.py $PG_DATA_DIR_DEFAULT
+fi
+
+# Delete compiled templates in case they are out of date
+if [[ ! -z $GALAXY_CONFIG_TEMPLATE_CACHE_PATH ]]; then
+ rm -rf $GALAXY_CONFIG_TEMPLATE_CACHE_PATH/*
+fi
+
+# Enable loading of dependencies on startup. Such as LDAP.
+# Adapted from galaxyproject/galaxy/scripts/common_startup.sh
+if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]]
+ then
+ echo "Installing optional dependencies in galaxy virtual environment..."
+ : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
+ GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib python -c "import galaxy.dependencies; print('\n'.join(galaxy.dependencies.optional('$GALAXY_CONFIG_FILE')))")
+ [ -z "$GALAXY_CONDITIONAL_DEPENDENCIES" ] || echo "$GALAXY_CONDITIONAL_DEPENDENCIES" | pip install -q -r /dev/stdin --index-url "${GALAXY_WHEELS_INDEX_URL}"
+fi
+
+if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]]
+ then
+ echo "Installing development requirements in galaxy virtual environment..."
+ : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"}
+ dev_requirements='./lib/galaxy/dependencies/dev-requirements.txt'
+ [ -f $dev_requirements ] && pip install -q -r $dev_requirements --index-url "${GALAXY_WHEELS_INDEX_URL}"
+fi
+
+# Enable Test Tool Shed
+if [[ ! -z $ENABLE_TTS_INSTALL ]]
+ then
+ echo "Enable installation from the Test Tool Shed."
+ export GALAXY_CONFIG_TOOL_SHEDS_CONFIG_FILE=$GALAXY_HOME/tool_sheds_conf.xml
+fi
+
+# Remove all default tools from Galaxy by default
+if [[ ! -z $BARE ]]
+ then
+ echo "Remove all tools from the tool_conf.xml file."
+ export GALAXY_CONFIG_TOOL_CONFIG_FILE=config/shed_tool_conf.xml,$GALAXY_ROOT/test/functional/tools/upload_tool_conf.xml
+fi
+
+# If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool
+if [[ ! -z $GALAXY_CONFIG_CONDA_AUTO_INSTALL ]]
+ then
+ if [ ! -d "/tool_deps/_conda/envs/__bcftools@1.5" ]; then
+ su $GALAXY_USER -c "/tool_deps/_conda/bin/conda create -y --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults --name __bcftools@1.5 bcftools=1.5"
+ su $GALAXY_USER -c "/tool_deps/_conda/bin/conda clean --tarballs --yes"
+ fi
+fi
+
+if [[ ! -z $GALAXY_EXTRAS_CONFIG_POSTGRES ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ # Backward compatibility for exported postgresql directories before version 15.08.
+ # In previous versions postgres has the UID/GID of 102/106. We changed this in
+ # https://github.com/bgruening/docker-galaxy-stable/pull/71 to GALAXY_POSTGRES_UID=1550 and
+ # GALAXY_POSTGRES_GID=1550
+ if [ -e /export/postgresql/ ];
+ then
+ if [ `stat -c %g /export/postgresql/` == "106" ];
+ then
+ chown -R postgres:postgres /export/postgresql/
+ fi
+ fi
+ fi
+fi
+
+
+if [[ ! -z $GALAXY_EXTRAS_CONFIG_CONDOR ]]; then
+ if [[ ! -z $ENABLE_CONDOR ]]
+ then
+ if [[ ! -z $CONDOR_HOST ]]
+ then
+ echo "Enabling Condor with external scheduler at $CONDOR_HOST"
+ echo "# Config generated by startup.sh
+CONDOR_HOST = $CONDOR_HOST
+ALLOW_ADMINISTRATOR = *
+ALLOW_OWNER = *
+ALLOW_READ = *
+ALLOW_WRITE = *
+ALLOW_CLIENT = *
+ALLOW_NEGOTIATOR = *
+DAEMON_LIST = MASTER, SCHEDD
+UID_DOMAIN = galaxy
+DISCARD_SESSION_KEYRING_ON_STARTUP = False
+TRUST_UID_DOMAIN = true" > /etc/condor/condor_config.local
+ fi
+
+ if [[ -e /export/condor_config ]]
+ then
+ echo "Replacing Condor config by locally supplied config from /export/condor_config"
+ rm -f /etc/condor/condor_config
+ ln -s /export/condor_config /etc/condor/condor_config
+ fi
+ fi
+fi
+
+
+# Copy or link the slurm/munge config files
+if [ -e /export/slurm.conf ]
+then
+ rm -f /etc/slurm-llnl/slurm.conf
+ ln -s /export/slurm.conf /etc/slurm-llnl/slurm.conf
+else
+ # Configure SLURM with runtime hostname.
+ # Use absolute path to python so virtualenv is not used.
+ /usr/bin/python /usr/sbin/configure_slurm.py
+fi
+if [ -e /export/munge.key ]
+then
+ rm -f /etc/munge/munge.key
+ ln -s /export/munge.key /etc/munge/munge.key
+ chmod 400 /export/munge.key
+fi
+
+# link the gridengine config file
+if [ -e /export/act_qmaster ]
+then
+ rm -f /var/lib/gridengine/default/common/act_qmaster
+ ln -s /export/act_qmaster /var/lib/gridengine/default/common/act_qmaster
+fi
+
+# Waits until postgres is ready
+function wait_for_postgres {
+ echo "Checking if database is up and running"
+ until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 1; echo "Waiting for database"; done
+ echo "Database connected"
+}
+
+# $NONUSE can be set to include cron, proftp, reports or nodejs
+# if included we will _not_ start these services.
+function start_supervisor {
+ supervisord -c /etc/supervisor/supervisord.conf
+ sleep 5
+
+ if [[ ! -z $SUPERVISOR_MANAGE_POSTGRES && ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ echo "Starting postgres"
+ supervisorctl start postgresql
+ fi
+ fi
+
+ wait_for_postgres
+
+ # Make sure the database is automatically updated
+ if [[ ! -z $GALAXY_AUTO_UPDATE_DB ]]
+ then
+ echo "Updating Galaxy database"
+ sh manage_db.sh -c /etc/galaxy/galaxy.yml upgrade
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_CRON ]]; then
+ if [[ $NONUSE != *"cron"* ]]
+ then
+ echo "Starting cron"
+ supervisorctl start cron
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_PROFTP ]]; then
+ if [[ $NONUSE != *"proftp"* ]]
+ then
+ echo "Starting ProFTP"
+ supervisorctl start proftpd
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_REPORTS ]]; then
+ if [[ $NONUSE != *"reports"* ]]
+ then
+ echo "Starting Galaxy reports webapp"
+ supervisorctl start reports
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_IE_PROXY ]]; then
+ if [[ $NONUSE != *"nodejs"* ]]
+ then
+ echo "Starting nodejs"
+ supervisorctl start galaxy:galaxy_nodejs_proxy
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then
+ if [[ $NONUSE != *"condor"* ]]
+ then
+ echo "Starting condor"
+ supervisorctl start condor
+ fi
+ fi
+
+ if [[ ! -z $SUPERVISOR_MANAGE_SLURM ]]; then
+ if [[ $NONUSE != *"slurmctld"* ]]
+ then
+ echo "Starting slurmctld"
+ supervisorctl start slurmctld
+ fi
+ if [[ $NONUSE != *"slurmd"* ]]
+ then
+ echo "Starting slurmd"
+ supervisorctl start slurmd
+ fi
+ supervisorctl start munge
+ else
+ if [[ $NONUSE != *"slurmctld"* ]]
+ then
+ echo "Starting slurmctld"
+ /usr/sbin/slurmctld -L $GALAXY_LOGS_DIR/slurmctld.log
+ fi
+ if [[ $NONUSE != *"slurmd"* ]]
+ then
+ echo "Starting slurmd"
+ /usr/sbin/slurmd -L $GALAXY_LOGS_DIR/slurmd.log
+ fi
+
+ # We need to run munged regardless
+ mkdir -p /var/run/munge && /usr/sbin/munged -f
+ fi
+}
+
+if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then
+ if [[ $NONUSE != *"postgres"* ]]
+ then
+ # Change the data_directory of postgresql in the main config file
+ ansible localhost -m lineinfile -a "line='data_directory = \'$PG_DATA_DIR_HOST\'' dest=$PG_CONF_DIR_DEFAULT/postgresql.conf backup=yes state=present regexp='data_directory'" &> /dev/null
+ fi
+fi
+
+if $PRIVILEGED; then
+ echo "Enable Galaxy Interactive Environments."
+ export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY="config/plugins/interactive_environments"
+ if [ x$DOCKER_PARENT == "x" ]; then
+ #build the docker in docker environment
+ bash /root/cgroupfs_mount.sh
+ start_supervisor
+ supervisorctl start docker
+ else
+ #inheriting /var/run/docker.sock from parent, assume that you need to
+ #run docker with sudo to validate
+ echo "$GALAXY_USER ALL = NOPASSWD : ALL" >> /etc/sudoers
+ start_supervisor
+ fi
+ if [[ ! -z $PULL_IE_IMAGES ]]; then
+ echo "About to pull IE images. Depending on the size, this may take a while!"
+
+ for ie in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do
+ enabled_var_name="GALAXY_EXTRAS_IE_FETCH_${ie}";
+ if [[ ${!enabled_var_name} ]]; then
+ # Store name in a var
+ image_var_name="GALAXY_EXTRAS_${ie}_IMAGE"
+ # And then read from that var
+ docker pull "${!image_var_name}"
+ fi
+ done
+ fi
+
+ # in privileged mode autofs and CVMFS is available
+ # install autofs
+ echo "Installing autofs to enable automatic CVMFS mounts"
+ apt-get install autofs --no-install-recommends -y
+ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/*
+else
+ echo "Disable Galaxy Interactive Environments. Start with --privileged to enable IE's."
+ export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY=""
+ start_supervisor
+fi
+
+if [ "$USE_HTTPS_LETSENCRYPT" != "False" ]
+then
+ echo "Settting up letsencrypt"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=letsencrypt \
+ --extra-vars galaxy_extras_galaxy_domain="GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL" \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+fi
+if [ "$USE_HTTPS" != "False" ]
+then
+ if [ -f /export/server.key -a -f /export/server.crt ]
+ then
+ echo "Copying SSL keys"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=own \
+ --extra-vars src_nginx_ssl_certificate_key=/export/server.key \
+ --extra-vars src_nginx_ssl_certificate=/export/server.crt \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+ else
+ echo "Setting up self-signed SSL keys"
+ ansible-playbook -c local /ansible/provision.yml \
+ --extra-vars gather_facts=False \
+ --extra-vars galaxy_extras_config_ssl=True \
+ --extra-vars galaxy_extras_config_ssl_method=self-signed \
+ --extra-vars galaxy_extras_config_nginx_upload=False \
+ --tags https
+ fi
+fi
+
+# In case the user wants the default admin to be created, do so.
+if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]]
+ then
+ echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing"
+ python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
+ -c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
+fi
+# If there is a need to execute actions that would require a live galaxy instance, such as adding workflows, setting quotas, adding more users, etc.
+# then place a file with that logic named post-start-actions.sh on the /export/ directory, it should have access to all environment variables
+# visible here.
+# The file needs to be executable (chmod a+x post-start-actions.sh)
+# uses ephemeris, present in docker-galaxy-stable, to wait for the local instance
+
+if [[ -f /export/post-start-actions.sh ]]
+ then
+ /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > $GALAXY_LOGS_DIR/post-start-actions.log
+ /export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log &
+ else
+ echo "No /export/post-start-actions.sh found or not executable so not running" >> $GALAXY_LOGS_DIR/post-start-actions.log
+fi
+
+
+# Reinstall tools if the user want to
+if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]]
+ then
+ /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > /home/galaxy/logs/post-start-actions.log &&
+ OLDIFS=$IFS
+ IFS=','
+ for TOOL_YML in `echo "$GALAXY_AUTO_UPDATE_TOOLS"`
+ do
+ echo "Installing tools from $TOOL_YML"
+ /tool_deps/_conda/bin/shed-tools install -g "http://127.0.0.1" -a "$GALAXY_DEFAULT_ADMIN_KEY" -t "$TOOL_YML"
+ /tool_deps/_conda/bin/conda clean --tarballs --yes
+ done
+ IFS=$OLDIFS
+fi
+
+# migrate custom IEs or Visualisations (Galaxy plugins)
+# this is needed for by the new client build system
+python3 ${GALAXY_ROOT}/scripts/plugin_staging.py
+
+# Enable verbose output
+if [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = "full" ]
+ then
+ tail -f /var/log/supervisor/* /var/log/nginx/* $GALAXY_LOGS_DIR/*.log
+ else
+ tail -f $GALAXY_LOGS_DIR/*.log
+fi
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/html_dir.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/toolfactory/html_dir.py Sun Aug 16 08:11:10 2020 -0400
@@ -0,0 +1,180 @@
+
+class tooloutHTMLifyer(self):
+
+ def compressPDF(self,inpdf=None,thumbformat='png'):
+ """need absolute path to pdf
+ note that GS gets confoozled if no $TMP or $TEMP
+ so we set it
+ """
+ assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
+ hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
+ sto = open(hlog,'a')
+ our_env = os.environ.copy()
+ our_tmp = our_env.get('TMP',None)
+ if not our_tmp:
+ our_tmp = our_env.get('TEMP',None)
+ if not (our_tmp and os.path.exists(our_tmp)):
+ newtmp = os.path.join(self.opts.output_dir,'tmp')
+ try:
+ os.mkdir(newtmp)
+ except:
+ sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
+ our_env['TEMP'] = newtmp
+ if not self.temp_warned:
+ sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
+ self.temp_warned = True
+ outpdf = '%s_compressed' % inpdf
+ cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
+ x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
+ retval1 = x.wait()
+ sto.close()
+ if retval1 == 0:
+ os.unlink(inpdf)
+ shutil.move(outpdf,inpdf)
+ os.unlink(hlog)
+ hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
+ sto = open(hlog,'w')
+ outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
+ if self.useGM:
+ cl2 = ['gm', 'convert', inpdf, outpng]
+ else: # assume imagemagick
+ cl2 = ['convert', inpdf, outpng]
+ x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
+ retval2 = x.wait()
+ sto.close()
+ if retval2 == 0:
+ os.unlink(hlog)
+ retval = retval1 or retval2
+ return retval
+
+
+ def getfSize(self,fpath,outpath):
+ """
+ format a nice file size string
+ """
+ size = ''
+ fp = os.path.join(outpath,fpath)
+ if os.path.isfile(fp):
+ size = '0 B'
+ n = float(os.path.getsize(fp))
+ if n > 2**20:
+ size = '%1.1f MB' % (n/2**20)
+ elif n > 2**10:
+ size = '%1.1f KB' % (n/2**10)
+ elif n > 0:
+ size = '%d B' % (int(n))
+ return size
+
+ def makeHtml(self):
+ """ Create an HTML file content to list all the artifacts found in the output_dir
+ """
+
+ galhtmlprefix = """
+
+
+
+
+
+
+
+
+ """
+ galhtmlattr = """
"""
+ galhtmlpostfix = """
\n"""
+
+ flist = os.listdir(self.opts.output_dir)
+ flist = [x for x in flist if x != 'Rplots.pdf']
+ flist.sort()
+ html = []
+ html.append(galhtmlprefix % progname)
+ html.append('Galaxy Tool "%s" run at %s
' % (self.toolname,timenow()))
+ fhtml = []
+ if len(flist) > 0:
+ logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
+ logfiles.sort()
+ logfiles = [x for x in logfiles if os.path.abspath(x) != os.path.abspath(self.tlog)]
+ logfiles.append(os.path.abspath(self.tlog)) # make it the last one
+ pdflist = []
+ npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf'])
+ for rownum,fname in enumerate(flist):
+ dname,e = os.path.splitext(fname)
+ sfsize = self.getfSize(fname,self.opts.output_dir)
+ if e.lower() == '.pdf' : # compress and make a thumbnail
+ thumb = '%s.%s' % (dname,self.thumbformat)
+ pdff = os.path.join(self.opts.output_dir,fname)
+ retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
+ if retval == 0:
+ pdflist.append((fname,thumb))
+ else:
+ pdflist.append((fname,fname))
+ if (rownum+1) % 2 == 0:
+ fhtml.append('%s | %s |
' % (fname,fname,sfsize))
+ else:
+ fhtml.append('%s | %s |
' % (fname,fname,sfsize))
+ for logfname in logfiles: # expect at least tlog - if more
+ if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
+ sectionname = 'All tool run'
+ if (len(logfiles) > 1):
+ sectionname = 'Other'
+ ourpdfs = pdflist
+ else:
+ realname = os.path.basename(logfname)
+ sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
+ ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
+ pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] != sectionname] # remove
+ nacross = 1
+ npdf = len(ourpdfs)
+
+ if npdf > 0:
+ nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
+ if int(nacross)**2 != npdf:
+ nacross += 1
+ nacross = int(nacross)
+ width = min(400,int(1200/nacross))
+ html.append('%s images and outputs
' % sectionname)
+ html.append('(Click on a thumbnail image to download the corresponding original PDF image)
')
+ ntogo = nacross # counter for table row padding with empty cells
+ html.append('\n')
+ for i,paths in enumerate(ourpdfs):
+ fname,thumb = paths
+ s= """ | \n""" % (fname,thumb,fname,width,fname)
+ if ((i+1) % nacross == 0):
+ s += '
\n'
+ ntogo = 0
+ if i < (npdf - 1): # more to come
+ s += ''
+ ntogo = nacross
+ else:
+ ntogo -= 1
+ html.append(s)
+ if html[-1].strip().endswith('
'):
+ html.append('
\n')
+ else:
+ if ntogo > 0: # pad
+ html.append(' | '*ntogo)
+ html.append('\n')
+ logt = open(logfname,'r').readlines()
+ logtext = [x for x in logt if x.strip() > '']
+ html.append('%s log output
' % sectionname)
+ if len(logtext) > 1:
+ html.append('\n\n')
+ html += logtext
+ html.append('\n
\n')
+ else:
+ html.append('%s is empty
' % logfname)
+ if len(fhtml) > 0:
+ fhtml.insert(0,'Output File Name (click to view) | Size |
\n')
+ fhtml.append('
')
+ html.append('All output files available for downloading
\n')
+ html += fhtml # add all non-pdf files to the end of the display
+ else:
+ html.append('### Error - %s returned no files - please confirm that parameters are sane
' % self.opts.interpreter)
+ html.append(galhtmlpostfix)
+ htmlf = file(self.opts.output_html,'w')
+ htmlf.write('\n'.join(html))
+ htmlf.write('\n')
+ htmlf.close()
+ self.html = html
+
+
diff -r 51fa77152988 -r f8c1694190f0 toolfactory/tftesthistory.tar.gz
Binary file toolfactory/tftesthistory.tar.gz has changed