# HG changeset patch
# User ufz
# Date 1729597995 0
# Node ID dfe0aae1495c0546e04f4bf8fc6e6f7199420e75
# Parent 4f4dc352d660ccaf172ef1e3b7e5e76573431e06
planemo upload for repository https://github.com/Helmholtz-UFZ/galaxy-tools/tree/main/tools/omero commit 19d84fd5a372f1428e3e5670144881a56e8af8b2
diff -r 4f4dc352d660 -r dfe0aae1495c macros.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/macros.xml Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,47 @@
+
+
+ 5.18.0
+ 3.0.1
+ 23.0
+
+
+
+ ezomero
+ pandas
+
+
+
+
+
+
+ omero-py
+
+ openjdk
+
+
+
+
+
+
+
+ ^[a-zA-Z0-9._-]*$
+ '..' not in value
+
+
+
+
+
+
+ --host '$omero_host'
+ --port $omero_port
+
+
+
+
+
+
\ No newline at end of file
diff -r 4f4dc352d660 -r dfe0aae1495c omero_filter.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_filter.py Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,77 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def filter_ids_ezo(user, pws, host, port, filter, id, value1, value2=None, tsv_file="filter_list.tsv"):
+
+ # Transform the id input in a list of integer
+ id = id.split(',')
+ id = list(map(int, id))
+
+ # Function to write tabular file from the ezomero output
+ def write_ids_to_tsv(data):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ for item in data:
+ writer.writerow([item]) # Write each ID
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+
+ if filter == "filename":
+ fn_ids = ez.filter_by_filename(conn, id, value1)
+ write_ids_to_tsv(fn_ids)
+ return fn_ids
+
+ elif filter == "KP":
+ kp_ims = ez.filter_by_kv(conn, id, value1, value2)
+ write_ids_to_tsv(kp_ims)
+ return kp_ims
+
+ elif filter == "tag":
+ tg_dict = ez.filter_by_tag_value(conn, id, value1)
+ write_ids_to_tsv(tg_dict)
+ return tg_dict
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch and save data as TSV based on object type.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str, required=True,
+ help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--filter', required=True,
+ help="Filter type - Filename, Key-Value Pairs, Tag")
+ parser.add_argument('--id', required=True,
+ help="List of images IDs")
+ parser.add_argument('--value1', required=True,
+ help="First searching values - Filename, Key, Tag")
+ parser.add_argument('--value2', required=False,
+ help="Second searching values - Value (necessary just for Key-Value Pairs filter")
+ parser.add_argument('--tsv_file', default='filter_list.tsv',
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.filter == "KP" and args.value2 is None:
+ raise ValueError("'--value 2' is necessary to retrieve KP")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ filter_ids_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ filter=args.filter,
+ value1=args.value1,
+ value2=args.value2,
+ id=args.id,
+ tsv_file=args.tsv_file)
diff -r 4f4dc352d660 -r dfe0aae1495c omero_get_id.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_get_id.py Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,116 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def get_ids_ezo(user, pws, host, port, final_obj_type, parent_obj_type, parent_id=None, tsv_file="id_list.tsv"):
+
+ # Function to write tabular file from the ezomero output
+ def write_ids_to_tsv(data):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ for item in data:
+ writer.writerow([item]) # Write each ID
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+
+ if final_obj_type == "Project":
+ proj_ids = ez.get_project_ids(conn)
+ write_ids_to_tsv(proj_ids)
+ return proj_ids
+
+ elif final_obj_type == "Dataset":
+ args = {'project': None}
+ if parent_obj_type == "Project":
+ args['project'] = parent_id
+ ds_ids = ez.get_dataset_ids(conn, **args)
+ write_ids_to_tsv(ds_ids)
+ return ds_ids
+
+ elif final_obj_type == "Image":
+ args = {
+ 'project': None,
+ 'dataset': None,
+ 'plate': None,
+ 'well': None
+ }
+ if parent_obj_type == "Project":
+ args['project'] = parent_id
+ elif parent_obj_type == "Dataset":
+ args['dataset'] = parent_id
+ elif parent_obj_type == "Plate":
+ args['plate'] = parent_id
+ elif parent_obj_type == "Well":
+ args['well'] = parent_id
+ elif parent_obj_type != "All":
+ raise ValueError("Object set as parent_obj_type is not compatible")
+
+ ds_ims = ez.get_image_ids(conn, **args)
+ write_ids_to_tsv(ds_ims)
+ return ds_ims
+
+ elif final_obj_type == "Annotation":
+ map_annot_ids = ez.get_map_annotation_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(map_annot_ids)
+ return map_annot_ids
+
+ elif final_obj_type == "Tag":
+ tag_ids = ez.get_tag_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(tag_ids)
+ return tag_ids
+
+ elif final_obj_type == "Roi":
+ roi_ids = ez.get_roi_ids(conn, parent_id)
+ write_ids_to_tsv(roi_ids)
+ return roi_ids
+
+ elif final_obj_type == "Table":
+ file_ann_ids = ez.get_file_annotation_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(file_ann_ids)
+ return file_ann_ids
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch OMERO object IDs as TSV from parent object.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str,
+ required=True, help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--final_obj_type', required=True,
+ help="Type of object to fetch ID: Project, Dataset, Image, Annotation, Tag, Roi, or Table.")
+ parser.add_argument('--parent_obj_type', required=True,
+ help="Type of object from which you fetch IDs: Project, Dataset, Plate, Well, Image (or 'All' if you want to get all objects).")
+ parser.add_argument('--parent_id', required=False, type=int,
+ help="ID of the OMERO object in `--parent_obj_type`, not required if you used `--parent_obj_type All`.")
+ parser.add_argument('--tsv_file', default='id_list.tsv',
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.parent_id is None and args.parent_obj_type != "All":
+ raise ValueError("ID is only optional is you use `--parent_obj_type All`")
+
+ if args.final_obj_type == "Roi" and args.parent_obj_type != "Image":
+ raise ValueError("Roi IDs can only be retrived from images, use `--parent_obj_type Image`")
+
+ if args.parent_obj_type == "All" and args.final_obj_type not in ["Image", "Dataset", "Project"]:
+ raise ValueError("Only Images, Datasets and Projects is compatible with `--parent_obj_type All`")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ get_ids_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ final_obj_type=args.final_obj_type,
+ parent_obj_type=args.parent_obj_type,
+ parent_id=args.parent_id,
+ tsv_file=args.tsv_file)
diff -r 4f4dc352d660 -r dfe0aae1495c omero_get_value.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_get_value.py Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,98 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def get_object_ezo(user, pws, host, port, obj_type, ids, tsv_file):
+ # Function to write tabular file from the ezomero output
+ def write_values_to_tsv(data, header):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow([header]) # Write the header
+ for item in data:
+ writer.writerow([item]) # Write each value
+
+ # Function to write tabular file from a dictionary ezomero output
+ def write_dict_to_tsv(data, headers):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow(headers) # Write the headers
+ for key, value in data.items():
+ writer.writerow([key, value]) # Write each key-value pair
+
+ # Function to write tabular file from list of list ezomero output
+ def write_table_to_tsv(data):
+ with open(tsv_file, 'w') as f:
+ for row in data:
+ f.write('\t'.join([str(val) for val in row]) + '\n')
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+ if obj_type == "Annotation":
+ ma_dict = {}
+ for maid in ids:
+ current_ma_dict = ez.get_map_annotation(conn, maid)
+ ma_dict = {**ma_dict, **current_ma_dict}
+ write_dict_to_tsv(ma_dict, ["Annotation ID", "Annotation Value"])
+ return ma_dict
+ elif obj_type == "Tag":
+ tags = []
+ for tag_id in ids:
+ tags.append(ez.get_tag(conn, tag_id))
+ # Sort the tags for consistency:
+ tags.sort
+ write_values_to_tsv(tags, "Tags")
+ return tags
+ elif obj_type == "Table":
+ if len(ids) > 1:
+ raise ValueError("Only one table can be exported at a time")
+ table = ez.get_table(conn, ids[0])
+ write_table_to_tsv(table)
+ return table
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch and save data as TSV based on object type.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str,
+ required=True, help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--obj_type', required=True,
+ help="Type of object to fetch: Annotation, Table or Tag.")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--ids', nargs='+', type=int,
+ help="IDs of the OMERO objects.")
+ group.add_argument('--ids_path',
+ help="File with IDs of the OMERO objects (one per line).")
+ parser.add_argument('--tsv_file', default='id_list.tsv', required=True,
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.ids_path:
+ args.ids = []
+ with open(args.ids_path, 'r') as f:
+ for line in f:
+ try:
+ args.ids.append(int(line))
+ except ValueError:
+ print(f"{line.strip()} is not a valid ID.")
+ if len(args.ids) == 0:
+ raise ValueError("Cound not find a single ID in the file.")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ get_object_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ obj_type=args.obj_type,
+ ids=args.ids,
+ tsv_file=args.tsv_file)
diff -r 4f4dc352d660 -r dfe0aae1495c omero_roi_import.xml
--- a/omero_roi_import.xml Thu Sep 12 16:30:36 2024 +0000
+++ b/omero_roi_import.xml Tue Oct 22 11:53:15 2024 +0000
@@ -1,46 +1,28 @@
-
+
with ezomero
- 5.18.0
- 3
+ macros.xml
+ 4
omero
-
- ezomero
- pandas
-
- openjdk
-
+
-
+
+
-
- ^[a-zA-Z0-9._-]*$
- '..' not in value
-
-
-
-
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_KV_import.txt
--- a/test-data/output_KV_import.txt Thu Sep 12 16:30:36 2024 +0000
+++ b/test-data/output_KV_import.txt Tue Oct 22 11:53:15 2024 +0000
@@ -1,1 +1,1 @@
-SUCCESS: Successfully uploaded metadata for dataset with ID 2. Result: {'Key1': 'Value1', 'Key2': 'Value2'}
+SUCCESS: Successfully uploaded metadata for dataset with ID 3. Result: {'Key1': 'Value1', 'Key2': 'Value2'}
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_filter_filename.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_filter_filename.tsv Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,1 @@
+2
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_filter_tag.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_filter_tag.tsv Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_ids_dataset.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_dataset.tsv Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_ids_image.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_image.tsv Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,2 @@
+1
+2
diff -r 4f4dc352d660 -r dfe0aae1495c test-data/output_ids_project.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_project.tsv Tue Oct 22 11:53:15 2024 +0000
@@ -0,0 +1,1 @@
+1