# HG changeset patch
# User ufz
# Date 1734382576 0
# Node ID 5ad32d18fe820f107c23138697fe8419ef5e2767
planemo upload for repository https://github.com/Helmholtz-UFZ/galaxy-tools/tree/main/tools/omero commit 636cbb62d59819caca5bc9eab0a8ec31be5bdd46
diff -r 000000000000 -r 5ad32d18fe82 README.md
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/README.md Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,19 @@
+# OMERO import images
+
+## Set up user credentials on Galaxy to connect to other omero instance
+
+To enable users to set their credentials for this tool,
+make sure the file `config/user_preferences_extra.yml` has the following section:
+
+```
+ omero_account:
+ description: Your OMERO instance connection credentials
+ inputs:
+ - name: username
+ label: Username
+ type: text
+ required: False
+ - name: password
+ label: Password
+ type: password
+ required: False
\ No newline at end of file
diff -r 000000000000 -r 5ad32d18fe82 macros.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/macros.xml Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,56 @@
+
+
+ 5.18.0
+ 3.0.1
+ 23.0
+
+
+
+ ezomero
+ pandas
+
+
+
+
+
+
+ omero-py
+
+ openjdk
+
+
+
+
+
+
+
+ ^[a-zA-Z0-9._-]*$
+ '..' not in value
+
+
+
+
+
+
+ --host '$omero_host'
+ --port $omero_port
+
+
+
+
+
+
+
+ **OMERO-suite Security Diclaimer:** To utilize the OMERO tools, the user must trust Galaxy instances.
+ The configuration file, which contains your OMERO password and username, is stored in the job working directory.
+ This directory only exists during the runtime of the job and should only be accessible by the system user that runs the job.
+ However, please be aware that your username and password may be exposed to users with administrative rights.
+ We are working on increasing the security of the OMERO suite
+
+
+
\ No newline at end of file
diff -r 000000000000 -r 5ad32d18fe82 omero_dataset_to_plate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_dataset_to_plate.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,127 @@
+import argparse
+import csv
+import json
+import re
+import sys
+from collections import defaultdict
+
+
+import omero
+from omero.gateway import BlitzGateway
+from omero.rtypes import rint, rstring
+
+
+def convert_dataset_to_plate(host, user, pws, port, dataset_id, log_file, mapping_file, delete_dataset):
+ """
+ Connect to OMERO server, convert a dataset to a plate using the specified well mapping file
+ """
+ conn = BlitzGateway(user, pws, host=host, port=port, secure=True)
+ if not conn.connect():
+ sys.exit("ERROR: Failed to connect to OMERO server")
+
+ def log_message(message, status="INFO"):
+ with open(log_file, 'w') as f:
+ f.write(f"{message}")
+
+ dataset = conn.getObject("Dataset", dataset_id)
+ if dataset is None:
+ conn.close()
+ sys.exit("ERROR: Dataset not found")
+
+ update_service = conn.getUpdateService()
+
+ # Create a Plate
+ plate = omero.model.PlateI()
+ plate.name = rstring(dataset.getName())
+ plate = update_service.saveAndReturnObject(plate)
+
+ # Parse the mapping file
+ image_to_well_mapping = {}
+ if mapping_file:
+ with open(mapping_file, 'r') as f:
+ reader = csv.DictReader(f, delimiter='\t')
+ for row in reader:
+ filename = row['Filename']
+ well = row['Well']
+ match = re.match(r"([A-Z])(\d+)", well)
+ if match:
+ row_char, col = match.groups()
+ row = ord(row_char.upper()) - ord('A')
+ col = int(col) - 1
+ image_to_well_mapping[filename] = (row, col)
+ else:
+ conn.close()
+ sys.exit(f"Invalid well format '{well}' for file '{filename}'")
+
+ # List the dataset children
+ images = list(dataset.listChildren())
+ if not images:
+ conn.close()
+ sys.exit("ERROR: No images found in dataset")
+
+ # Compare images in the mapping file and in the dataset
+ grouped_images = defaultdict(list)
+ for image in images:
+ image_name = image.getName()
+ if image_to_well_mapping:
+ if image_name in image_to_well_mapping:
+ row, col = image_to_well_mapping[image_name]
+ grouped_images[(row, col)].append(image)
+ else:
+ conn.close()
+ sys.exit(f"Image '{image_name}' not found in mapping file.")
+
+ # Assign images to the well based on the mapping file
+ for (row, col), imgs_in_group in grouped_images.items():
+ well = omero.model.WellI()
+ well.plate = omero.model.PlateI(plate.id.val, False)
+ well.column = rint(col)
+ well.row = rint(row)
+
+ for image in imgs_in_group:
+ ws = omero.model.WellSampleI()
+ ws.image = omero.model.ImageI(image.id, False)
+ ws.well = well
+ well.addWellSample(ws)
+
+ try:
+ update_service.saveObject(well)
+ except ValueError as e:
+ conn.close()
+ sys.exit("ERROR: Failed to update plate for dataset '{}' due to: {}".format(dataset.getName(), str(e)))
+
+ # Close the connection and, in case, delete the dataset
+ if delete_dataset is True:
+ conn.deleteObjects("Dataset", [dataset_id], wait=True)
+ log_message(f"Images from Dataset {dataset_id} successfully added to Plate {plate.id.val}")
+ conn.close()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Convert an OMERO dataset to a plate.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str, required=True,
+ help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True, help='OMERO host')
+ parser.add_argument('--port', required=True, type=int, help='OMERO port')
+ parser.add_argument('--dataset_id', type=int, required=True, help="Dataset ID to convert plate")
+ parser.add_argument('--log_file', default='metadata_import_log.txt',
+ help='Path to the log file')
+ parser.add_argument('--mapping_file',
+ help='Tabular file mapping filenames to well positions (2 columns: filename, Well)')
+ parser.add_argument('--delete_dataset', action='store_true',
+ help='Flag to delete the original dataset')
+ args = parser.parse_args()
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ convert_dataset_to_plate(
+ user=crds['username'],
+ pws=crds['password'],
+ host=args.host,
+ port=args.port,
+ dataset_id=args.dataset_id,
+ log_file=args.log_file,
+ mapping_file=args.mapping_file,
+ delete_dataset=args.delete_dataset
+ )
diff -r 000000000000 -r 5ad32d18fe82 omero_dataset_to_plate.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_dataset_to_plate.xml Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,67 @@
+
+ with omero-py
+
+ macros.xml
+ 3
+
+
+ omero
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Description
+-----------
+- Tool to convert an existing dataset to a plate in OMERO
+- The tool is taking as input a mapping tabular file with Filename and Well Position:
+ +------------+---------------+
+ | Filename | Well |
+ +============+===============+
+ | image1.tiff| A2 |
+ +------------+---------------+
+ | image2.tiff| B5 |
+ +------------+---------------+
+ | image3.tiff| H12 |
+ +------------+---------------+
+@SECURITY_DISCLAIMER@
+
+
+ 10.1038/nmeth.1896
+
+
diff -r 000000000000 -r 5ad32d18fe82 omero_filter.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_filter.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,77 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def filter_ids_ezo(user, pws, host, port, filter, id, value1, value2=None, tsv_file="filter_list.tsv"):
+
+ # Transform the id input in a list of integer
+ id = id.split(',')
+ id = list(map(int, id))
+
+ # Function to write tabular file from the ezomero output
+ def write_ids_to_tsv(data):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ for item in data:
+ writer.writerow([item]) # Write each ID
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+
+ if filter == "filename":
+ fn_ids = ez.filter_by_filename(conn, id, value1)
+ write_ids_to_tsv(fn_ids)
+ return fn_ids
+
+ elif filter == "KP":
+ kp_ims = ez.filter_by_kv(conn, id, value1, value2)
+ write_ids_to_tsv(kp_ims)
+ return kp_ims
+
+ elif filter == "tag":
+ tg_dict = ez.filter_by_tag_value(conn, id, value1)
+ write_ids_to_tsv(tg_dict)
+ return tg_dict
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch and save data as TSV based on object type.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str, required=True,
+ help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--filter', required=True,
+ help="Filter type - Filename, Key-Value Pairs, Tag")
+ parser.add_argument('--id', required=True,
+ help="List of images IDs")
+ parser.add_argument('--value1', required=True,
+ help="First searching values - Filename, Key, Tag")
+ parser.add_argument('--value2', required=False,
+ help="Second searching values - Value (necessary just for Key-Value Pairs filter")
+ parser.add_argument('--tsv_file', default='filter_list.tsv',
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.filter == "KP" and args.value2 is None:
+ raise ValueError("'--value 2' is necessary to retrieve KP")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ filter_ids_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ filter=args.filter,
+ value1=args.value1,
+ value2=args.value2,
+ id=args.id,
+ tsv_file=args.tsv_file)
diff -r 000000000000 -r 5ad32d18fe82 omero_get_id.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_get_id.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,116 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def get_ids_ezo(user, pws, host, port, final_obj_type, parent_obj_type, parent_id=None, tsv_file="id_list.tsv"):
+
+ # Function to write tabular file from the ezomero output
+ def write_ids_to_tsv(data):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ for item in data:
+ writer.writerow([item]) # Write each ID
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+
+ if final_obj_type == "Project":
+ proj_ids = ez.get_project_ids(conn)
+ write_ids_to_tsv(proj_ids)
+ return proj_ids
+
+ elif final_obj_type == "Dataset":
+ args = {'project': None}
+ if parent_obj_type == "Project":
+ args['project'] = parent_id
+ ds_ids = ez.get_dataset_ids(conn, **args)
+ write_ids_to_tsv(ds_ids)
+ return ds_ids
+
+ elif final_obj_type == "Image":
+ args = {
+ 'project': None,
+ 'dataset': None,
+ 'plate': None,
+ 'well': None
+ }
+ if parent_obj_type == "Project":
+ args['project'] = parent_id
+ elif parent_obj_type == "Dataset":
+ args['dataset'] = parent_id
+ elif parent_obj_type == "Plate":
+ args['plate'] = parent_id
+ elif parent_obj_type == "Well":
+ args['well'] = parent_id
+ elif parent_obj_type != "All":
+ raise ValueError("Object set as parent_obj_type is not compatible")
+
+ ds_ims = ez.get_image_ids(conn, **args)
+ write_ids_to_tsv(ds_ims)
+ return ds_ims
+
+ elif final_obj_type == "Annotation":
+ map_annot_ids = ez.get_map_annotation_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(map_annot_ids)
+ return map_annot_ids
+
+ elif final_obj_type == "Tag":
+ tag_ids = ez.get_tag_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(tag_ids)
+ return tag_ids
+
+ elif final_obj_type == "Roi":
+ roi_ids = ez.get_roi_ids(conn, parent_id)
+ write_ids_to_tsv(roi_ids)
+ return roi_ids
+
+ elif final_obj_type == "Table":
+ file_ann_ids = ez.get_file_annotation_ids(conn, parent_obj_type, parent_id)
+ write_ids_to_tsv(file_ann_ids)
+ return file_ann_ids
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch OMERO object IDs as TSV from parent object.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str,
+ required=True, help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--final_obj_type', required=True,
+ help="Type of object to fetch ID: Project, Dataset, Image, Annotation, Tag, Roi, or Table.")
+ parser.add_argument('--parent_obj_type', required=True,
+ help="Type of object from which you fetch IDs: Project, Dataset, Plate, Well, Image (or 'All' if you want to get all objects).")
+ parser.add_argument('--parent_id', required=False, type=int,
+ help="ID of the OMERO object in `--parent_obj_type`, not required if you used `--parent_obj_type All`.")
+ parser.add_argument('--tsv_file', default='id_list.tsv',
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.parent_id is None and args.parent_obj_type != "All":
+ raise ValueError("ID is only optional is you use `--parent_obj_type All`")
+
+ if args.final_obj_type == "Roi" and args.parent_obj_type != "Image":
+ raise ValueError("Roi IDs can only be retrived from images, use `--parent_obj_type Image`")
+
+ if args.parent_obj_type == "All" and args.final_obj_type not in ["Image", "Dataset", "Project"]:
+ raise ValueError("Only Images, Datasets and Projects is compatible with `--parent_obj_type All`")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ get_ids_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ final_obj_type=args.final_obj_type,
+ parent_obj_type=args.parent_obj_type,
+ parent_id=args.parent_id,
+ tsv_file=args.tsv_file)
diff -r 000000000000 -r 5ad32d18fe82 omero_get_value.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_get_value.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,98 @@
+import argparse
+import csv
+import json
+import sys
+
+import ezomero as ez
+
+
+def get_object_ezo(user, pws, host, port, obj_type, ids, tsv_file):
+ # Function to write tabular file from the ezomero output
+ def write_values_to_tsv(data, header):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow([header]) # Write the header
+ for item in data:
+ writer.writerow([item]) # Write each value
+
+ # Function to write tabular file from a dictionary ezomero output
+ def write_dict_to_tsv(data, headers):
+ with open(tsv_file, 'w', newline='') as f:
+ writer = csv.writer(f, delimiter='\t')
+ writer.writerow(headers) # Write the headers
+ for key, value in data.items():
+ writer.writerow([key, value]) # Write each key-value pair
+
+ # Function to write tabular file from list of list ezomero output
+ def write_table_to_tsv(data):
+ with open(tsv_file, 'w') as f:
+ for row in data:
+ f.write('\t'.join([str(val) for val in row]) + '\n')
+
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+ if obj_type == "Annotation":
+ ma_dict = {}
+ for maid in ids:
+ current_ma_dict = ez.get_map_annotation(conn, maid)
+ ma_dict = {**ma_dict, **current_ma_dict}
+ write_dict_to_tsv(ma_dict, ["Annotation ID", "Annotation Value"])
+ return ma_dict
+ elif obj_type == "Tag":
+ tags = []
+ for tag_id in ids:
+ tags.append(ez.get_tag(conn, tag_id))
+ # Sort the tags for consistency:
+ tags.sort
+ write_values_to_tsv(tags, "Tags")
+ return tags
+ elif obj_type == "Table":
+ if len(ids) > 1:
+ raise ValueError("Only one table can be exported at a time")
+ table = ez.get_table(conn, ids[0])
+ write_table_to_tsv(table)
+ return table
+
+ else:
+ sys.exit(f"Unsupported object type: {filter}")
+
+
+# Argument parsing
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Fetch and save data as TSV based on object type.")
+ parser.add_argument("--credential-file", dest="credential_file", type=str,
+ required=True, help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True,
+ help="Host server address.")
+ parser.add_argument('--port', required=True, type=int,
+ help='OMERO port')
+ parser.add_argument('--obj_type', required=True,
+ help="Type of object to fetch: Annotation, Table or Tag.")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--ids', nargs='+', type=int,
+ help="IDs of the OMERO objects.")
+ group.add_argument('--ids_path',
+ help="File with IDs of the OMERO objects (one per line).")
+ parser.add_argument('--tsv_file', default='id_list.tsv', required=True,
+ help="Output TSV file path.")
+ args = parser.parse_args()
+
+ if args.ids_path:
+ args.ids = []
+ with open(args.ids_path, 'r') as f:
+ for line in f:
+ try:
+ args.ids.append(int(line))
+ except ValueError:
+ print(f"{line.strip()} is not a valid ID.")
+ if len(args.ids) == 0:
+ raise ValueError("Cound not find a single ID in the file.")
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ # Call the main function to get the object and save it as a TSV
+ get_object_ezo(user=crds['username'], pws=crds['password'], host=args.host,
+ port=args.port,
+ obj_type=args.obj_type,
+ ids=args.ids,
+ tsv_file=args.tsv_file)
diff -r 000000000000 -r 5ad32d18fe82 omero_metadata_upload.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_metadata_upload.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,101 @@
+import argparse
+import json
+from datetime import datetime
+
+import ezomero as ez
+import pandas as pd
+
+
+def metadata_import_ezo(user, pws, host, port, obj_type, did=None, ann_type="table", ann_file=None, an_name=None,
+ log_file='metadata_import_log.txt'):
+ def upload_metadata(conn, obj_type, did, data_dict, df, ann_type, an_name):
+ try:
+ if ann_type == "KV":
+ id_map_ann = ez.post_map_annotation(conn, obj_type, object_id=int(did), kv_dict=data_dict, ns=an_name)
+ ma_dict = ez.get_map_annotation(conn, id_map_ann)
+ return ma_dict
+ elif ann_type == "table":
+ id_tb_ann = ez.post_table(conn, df, object_type=obj_type, object_id=int(did), title=an_name,
+ headers=True)
+ tb_dict = ez.get_table(conn, id_tb_ann)
+ return tb_dict
+ except Exception as e:
+ log_error(f"Error uploading metadata for {obj_type} with ID {did}: {str(e)}")
+ return None
+
+ def log_error(message):
+ with open(log_file, 'w') as f:
+ f.write(f"ERROR: {message}\n")
+
+ def log_success(message):
+ with open(log_file, 'w') as f:
+ f.write(f"SUCCESS: {message}\n")
+
+ try:
+ df = pd.read_csv(ann_file, delimiter='\t')
+ except FileNotFoundError as e:
+ log_error(f"Annotation file not found: {str(e)}")
+ return
+
+ if ann_type == "table":
+ data_dict = df.to_dict(orient='records')
+ elif ann_type == "KV":
+ data_dict = {col: df[col].iloc[0] for col in df.columns}
+
+ try:
+ with ez.connect(user, pws, "", host, port, secure=True) as conn:
+ if obj_type == "project":
+ if did is None:
+ did = ez.post_project(conn, project_name=str(datetime.now()))
+ result = upload_metadata(conn, "Project", did, data_dict, df, ann_type, an_name)
+ elif obj_type == "screen":
+ if did is None:
+ did = ez.post_screen(conn, screen_name=str(datetime.now()))
+ result = upload_metadata(conn, "Screen", did, data_dict, df, ann_type, an_name)
+ elif obj_type == "dataset":
+ if did is None:
+ did = ez.post_dataset(conn, dataset_name=str(datetime.now()))
+ result = upload_metadata(conn, "Dataset", did, data_dict, df, ann_type, an_name)
+ elif obj_type == "plate":
+ result = upload_metadata(conn, "Plate", did, data_dict, df, ann_type, an_name)
+ elif obj_type == "well":
+ result = upload_metadata(conn, "Well", did, data_dict, df, ann_type, an_name)
+ elif obj_type == "image":
+ result = upload_metadata(conn, "Image", did, data_dict, df, ann_type, an_name)
+ else:
+ raise ValueError("Unsupported object type provided: {}".format(obj_type))
+
+ if result is not None:
+ log_success(f"Successfully uploaded metadata for {obj_type} with ID {did}. Result: {result}")
+ else:
+ log_error(f"Failed to upload metadata for {obj_type} with ID {did}.")
+
+ conn.close()
+
+ except Exception as e:
+ log_error(f"Connection error: {str(e)}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Import metadata into OMERO.')
+ parser.add_argument("--credential-file", dest="credential_file", type=str, required=True,
+ help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument('--host', required=True, help='OMERO host')
+ parser.add_argument('--port', required=True, type=int, help='OMERO port')
+ parser.add_argument('--obj_type', required=True, choices=['project', 'screen', 'dataset', 'plate',
+ 'well ', 'image'],
+ help='Type of OMERO object')
+ parser.add_argument('--did', type=int, help='ID of the object (if it exists)')
+ parser.add_argument('--ann_type', required=True, choices=['table', 'KV'], help='Annotation type')
+ parser.add_argument('--ann_file', required=True, help='Path to the annotation file')
+ parser.add_argument('--an_name', required=True, help='Namespace or title for the annotation')
+ parser.add_argument('--log_file', default='metadata_import_log.txt', help='Path to the log file')
+
+ args = parser.parse_args()
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ metadata_import_ezo(user=crds['username'], pws=crds['password'], host=args.host, port=args.port,
+ obj_type=args.obj_type, did=args.did, ann_type=args.ann_type,
+ ann_file=args.ann_file, an_name=args.an_name, log_file=args.log_file)
diff -r 000000000000 -r 5ad32d18fe82 omero_roi_upload.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/omero_roi_upload.py Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,176 @@
+import argparse
+import json
+import re
+
+import numpy as np
+import pandas as pd
+from ezomero import connect, post_roi
+from ezomero.rois import Ellipse, Label, Line, Point, Polygon, Polyline, Rectangle
+
+
+def parse_color(color_str):
+ if not color_str:
+ return None
+ return tuple(map(int, re.findall(r'\d+', color_str)))
+
+
+def parse_points(points_str):
+ if not points_str:
+ return None
+ # Remove leading and trailing brackets and split into individual points
+ points_str = points_str.strip("[]")
+ points = points_str.split("),(")
+ points = [point.strip("()") for point in points] # Remove any remaining parentheses
+ return [tuple(map(float, point.split(','))) for point in points]
+
+
+def create_shape(row):
+ shape_type = row['shape']
+ shape = None
+
+ if shape_type == 'Ellipse':
+ shape = Ellipse(
+ x=row['x'],
+ y=row['y'],
+ x_rad=row['x_rad'],
+ y_rad=row['y_rad'],
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ label=row.get('label'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Label':
+ shape = Label(
+ x=row['x'],
+ y=row['y'],
+ label=row['label'],
+ fontSize=row['fontSize'],
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Line':
+ shape = Line(
+ x1=row['x1'],
+ y1=row['y1'],
+ x2=row['x2'],
+ y2=row['y2'],
+ markerStart=row.get('markerStart', None),
+ markerEnd=row.get('markerEnd', None),
+ label=row.get('label'),
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Point':
+ shape = Point(
+ x=row['x'],
+ y=row['y'],
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ label=row.get('label'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Polygon':
+ shape = Polygon(
+ points=parse_points(row['points']),
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ label=row.get('label'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Polyline':
+ shape = Polyline(
+ points=parse_points(row['points']),
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ label=row.get('label'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ elif shape_type == 'Rectangle':
+ shape = Rectangle(
+ x=row['x'],
+ y=row['y'],
+ width=row['width'],
+ height=row['height'],
+ z=row.get('z'),
+ c=row.get('c'),
+ t=row.get('t'),
+ label=row.get('label'),
+ fill_color=parse_color(row.get('fill_color')),
+ stroke_color=parse_color(row.get('stroke_color')),
+ stroke_width=row.get('stroke_width')
+ )
+ return shape
+
+
+def main(input_file, conn, image_id, log_file):
+ # Open log file
+ with open(log_file, 'w') as log:
+ df = pd.read_csv(input_file, sep='\t')
+ # Replace nan to none
+ df = df.replace({np.nan: None})
+ for index, row in df.iterrows():
+ msg = f"Processing row {index + 1}/{len(df)}: {row.to_dict()}"
+ print(msg)
+ log.write(msg + "\n")
+ shape = create_shape(row)
+ if shape:
+ roi_name = row['roi_name'] if 'roi_name' in row else None
+ roi_description = row['roi_description'] if 'roi_description' in row else None
+ roi_id = post_roi(conn, image_id, [shape], name=roi_name, description=roi_description)
+ msg = f"ROI ID: {roi_id} for row {index + 1}"
+ print(msg)
+ log.write(msg + "\n")
+ else:
+ msg = f"Skipping row {index + 1}: Unable to create shape"
+ print(msg)
+ log.write(msg + "\n")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Create shapes from a tabular file and optionally post them as an ROI to OMERO.")
+ parser.add_argument("--input_file", help="Path to the input tabular file.")
+ parser.add_argument("--image_id", type=int, required=True, help="ID of the image to which the ROI will be linked")
+ parser.add_argument("--host", type=str, required=True, help="OMERO server host")
+ parser.add_argument("--credential-file", dest="credential_file", type=str, required=True, help="Credential file (JSON file with username and password for OMERO)")
+ parser.add_argument("--port", type=int, default=4064, help="OMERO server port")
+ parser.add_argument("--log_file", type=str, default="process.txt", help="Log file path")
+
+ args = parser.parse_args()
+
+ with open(args.credential_file, 'r') as f:
+ crds = json.load(f)
+
+ conn = connect(
+ host=args.host,
+ user=crds['username'],
+ password=crds['password'],
+ port=args.port,
+ group="",
+ secure=True
+ )
+
+ try:
+ main(args.input_file, conn, args.image_id, args.log_file)
+ finally:
+ conn.close()
diff -r 000000000000 -r 5ad32d18fe82 test-data/dataset_conversion_log.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/dataset_conversion_log.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+Images from Dataset 2 successfully added to Plate 1
\ No newline at end of file
diff -r 000000000000 -r 5ad32d18fe82 test-data/input1.tif
Binary file test-data/input1.tif has changed
diff -r 000000000000 -r 5ad32d18fe82 test-data/input2.tif
Binary file test-data/input2.tif has changed
diff -r 000000000000 -r 5ad32d18fe82 test-data/input_roi.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/input_roi.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,8 @@
+shape x y x_rad y_rad label fontSize x1 y1 x2 y2 points width height fill_color stroke_color stroke_width z c t roi_name roi_description
+Ellipse 50.0 50.0 20.0 10.0 (255,0,0,128) (0,0,0,255) 2 0 0 0 Example ROI This is an example ROI
+Label 100.0 100.0 Test Label 12.0 (255,255,255,0) (0,0,255,255) 1 0 0 0 Example ROI This is an example ROI
+Line 200.0 200.0 250.0 250.0 (0,255,0,128) (0,0,0,255) 2 0 1 0 Example ROI This is an example ROI
+Point 150.0 150.0 (0,0,255,128) (255,0,0,255) 3 0 2 0 Example ROI This is an example ROI
+Polygon (300,300),(350,350),(300,400) (255,255,0,128) (0,0,0,255) 2 1 0 0 Example ROI This is an example ROI
+Polyline (400,400),(450,450),(400,500) (0,255,255,128) (0,0,0,255) 3 0 0 0 Example ROI This is an example ROI
+Rectangle 500.0 500.0 100.0 50.0 (255,0,255,128) (0,0,0,255) 2 0 0 0 Example ROI This is an example ROI
diff -r 000000000000 -r 5ad32d18fe82 test-data/input_roi_minimal.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/input_roi_minimal.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+shape points label
+Polygon (300,300),(350,350),(300,400) Example ROI
diff -r 000000000000 -r 5ad32d18fe82 test-data/mapping.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/mapping.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,3 @@
+Filename Well
+sample_A03_image.jpg A2
+sample_H11_image.jpg H5
diff -r 000000000000 -r 5ad32d18fe82 test-data/metadata.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/metadata.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+Key1 Key2
+Value1 Value2
\ No newline at end of file
diff -r 000000000000 -r 5ad32d18fe82 test-data/omero_output.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/omero_output.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+Image:5
+Image:6
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_KV_import.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_KV_import.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+SUCCESS: Successfully uploaded metadata for dataset with ID 4. Result: {'Key1': 'Value1', 'Key2': 'Value2'}
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_filter_filename.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_filter_filename.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_filter_tag.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_filter_tag.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_ids_dataset.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_dataset.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_ids_image.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_image.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+1
+2
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_ids_project.tsv
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_ids_project.tsv Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+1
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_table_import.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_table_import.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+SUCCESS: Successfully uploaded metadata for project with ID 2. Result: Key1 Key2
+0 Value1 Value2
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_table_roi.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_table_roi.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,14 @@
+Processing row 1/7: {'shape': 'Ellipse', 'x': 50.0, 'y': 50.0, 'x_rad': 20.0, 'y_rad': 10.0, 'label': None, 'fontSize': None, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': None, 'width': None, 'height': None, 'fill_color': '(255,0,0,128)', 'stroke_color': '(0,0,0,255)', 'stroke_width': 2, 'z': 0, 'c': 0, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 1 for row 1
+Processing row 2/7: {'shape': 'Label', 'x': 100.0, 'y': 100.0, 'x_rad': None, 'y_rad': None, 'label': 'Test Label', 'fontSize': 12.0, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': None, 'width': None, 'height': None, 'fill_color': '(255,255,255,0)', 'stroke_color': '(0,0,255,255)', 'stroke_width': 1, 'z': 0, 'c': 0, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 2 for row 2
+Processing row 3/7: {'shape': 'Line', 'x': None, 'y': None, 'x_rad': None, 'y_rad': None, 'label': None, 'fontSize': None, 'x1': 200.0, 'y1': 200.0, 'x2': 250.0, 'y2': 250.0, 'points': None, 'width': None, 'height': None, 'fill_color': '(0,255,0,128)', 'stroke_color': '(0,0,0,255)', 'stroke_width': 2, 'z': 0, 'c': 1, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 3 for row 3
+Processing row 4/7: {'shape': 'Point', 'x': 150.0, 'y': 150.0, 'x_rad': None, 'y_rad': None, 'label': None, 'fontSize': None, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': None, 'width': None, 'height': None, 'fill_color': '(0,0,255,128)', 'stroke_color': '(255,0,0,255)', 'stroke_width': 3, 'z': 0, 'c': 2, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 4 for row 4
+Processing row 5/7: {'shape': 'Polygon', 'x': None, 'y': None, 'x_rad': None, 'y_rad': None, 'label': None, 'fontSize': None, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': '(300,300),(350,350),(300,400)', 'width': None, 'height': None, 'fill_color': '(255,255,0,128)', 'stroke_color': '(0,0,0,255)', 'stroke_width': 2, 'z': 1, 'c': 0, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 5 for row 5
+Processing row 6/7: {'shape': 'Polyline', 'x': None, 'y': None, 'x_rad': None, 'y_rad': None, 'label': None, 'fontSize': None, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': '(400,400),(450,450),(400,500)', 'width': None, 'height': None, 'fill_color': '(0,255,255,128)', 'stroke_color': '(0,0,0,255)', 'stroke_width': 3, 'z': 0, 'c': 0, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 6 for row 6
+Processing row 7/7: {'shape': 'Rectangle', 'x': 500.0, 'y': 500.0, 'x_rad': None, 'y_rad': None, 'label': None, 'fontSize': None, 'x1': None, 'y1': None, 'x2': None, 'y2': None, 'points': None, 'width': 100.0, 'height': 50.0, 'fill_color': '(255,0,255,128)', 'stroke_color': '(0,0,0,255)', 'stroke_width': 2, 'z': 0, 'c': 0, 't': 0, 'roi_name': 'Example ROI', 'roi_description': 'This is an example ROI'}
+ROI ID: 7 for row 7
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_table_roi_minimal.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_table_roi_minimal.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,2 @@
+Processing row 1/1: {'shape': 'Polygon', 'points': '(300,300),(350,350),(300,400)', 'label': 'Example ROI'}
+ROI ID: 8 for row 1
diff -r 000000000000 -r 5ad32d18fe82 test-data/output_target_import.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/output_target_import.txt Mon Dec 16 20:56:16 2024 +0000
@@ -0,0 +1,1 @@
+SUCCESS: Successfully uploaded metadata for dataset with ID 1. Result: {'Key1': 'Value1', 'Key2': 'Value2'}