Mercurial > repos > bgruening > chatgpt_openai_api
comparison chatgpt.py @ 2:dab494dce303 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/chatgpt commit d2d08c3866c0f4a2f10372ae15c5dac5ea2d0bf0
author | bgruening |
---|---|
date | Fri, 23 Aug 2024 10:21:21 +0000 |
parents | 08c658e9aa9e |
children | 7770a4bd42e2 |
comparison
equal
deleted
inserted
replaced
1:08c658e9aa9e | 2:dab494dce303 |
---|---|
38 ] | 38 ] |
39 | 39 |
40 vision_sup_ext = ["jpg", "jpeg", "png", "webp", "gif"] | 40 vision_sup_ext = ["jpg", "jpeg", "png", "webp", "gif"] |
41 | 41 |
42 file_search_file_streams = [] | 42 file_search_file_streams = [] |
43 image_urls = [] | 43 image_files = [] |
44 | 44 |
45 for path in context_files: | 45 for path in context_files: |
46 ext = path.split(".")[-1].lower() | 46 ext = path.split(".")[-1].lower() |
47 if ext in vision_sup_ext and model in ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo"]: | 47 if ext in vision_sup_ext: |
48 if os.path.getsize(path) > 20 * 1024 * 1024: | 48 if os.path.getsize(path) > 20 * 1024 * 1024: |
49 raise Exception(f"File {path} exceeds the 20MB limit and will not be processed.") | 49 print(f"File {path} exceeds the 20MB limit and will not be processed.") |
50 sys.exit(1) | |
50 file = client.files.create(file=open(path, "rb"), purpose="vision") | 51 file = client.files.create(file=open(path, "rb"), purpose="vision") |
51 promt = {"type": "image_file", "image_file": {"file_id": file.id}} | 52 promt = {"type": "image_file", "image_file": {"file_id": file.id}} |
52 image_urls.append(promt) | 53 image_files.append(promt) |
53 | |
54 elif ext in file_search_sup_ext: | 54 elif ext in file_search_sup_ext: |
55 file_search_file_streams.append(open(path, "rb")) | 55 file_search_file_streams.append(open(path, "rb")) |
56 else: | |
57 raise Exception("Not supported file!") | |
58 | 56 |
59 assistant = client.beta.assistants.create( | 57 assistant = client.beta.assistants.create( |
60 instructions="You are going to get question about the file(s).", | 58 instructions="You will receive questions about files from file searches and image files. For file search queries, identify and retrieve the relevant files based on the question. For image file queries, analyze the image content and provide relevant information or insights based on the image data.", |
61 model=model, | 59 model=model, |
62 tools=[{"type": "file_search"}] if file_search_file_streams else None, | 60 tools=[{"type": "file_search"}] if file_search_file_streams else [], |
63 ) | 61 ) |
64 if file_search_file_streams: | 62 if file_search_file_streams: |
65 vector_store = client.beta.vector_stores.create() | 63 vector_store = client.beta.vector_stores.create() |
66 file_batch = client.beta.vector_stores.file_batches.upload_and_poll( | 64 file_batch = client.beta.vector_stores.file_batches.upload_and_poll( |
67 vector_store_id=vector_store.id, files=file_search_file_streams | 65 vector_store_id=vector_store.id, files=file_search_file_streams |
68 ) | 66 ) |
69 assistant = client.beta.assistants.update( | 67 assistant = client.beta.assistants.update( |
70 assistant_id=assistant.id, tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}} | 68 assistant_id=assistant.id, |
69 tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}}, | |
71 ) | 70 ) |
72 | 71 |
73 messages = [ | 72 messages = [ |
74 { | 73 { |
75 "role": "user", | 74 "role": "user", |
76 "content": [ | 75 "content": [ |
77 { | 76 { |
78 "type": "text", | 77 "type": "text", |
79 "text": question, | 78 "text": question, |
80 }, | 79 }, |
81 *image_urls, | 80 *image_files, |
82 ], | 81 ], |
83 } | 82 } |
84 ] | 83 ] |
85 thread = client.beta.threads.create(messages=messages) | 84 thread = client.beta.threads.create(messages=messages) |
86 run = client.beta.threads.runs.create_and_poll(thread_id=thread.id, assistant_id=assistant.id) | 85 run = client.beta.threads.runs.create_and_poll( |
87 messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id)) | 86 thread_id=thread.id, assistant_id=assistant.id |
87 ) | |
88 assistant_messages = list( | |
89 client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id) | |
90 ) | |
88 | 91 |
89 message_content = messages[0].content[0].text.value | 92 message_content = assistant_messages[0].content[0].text.value |
90 print("Output has been saved!") | 93 print("Output has been saved!") |
91 with open("output.txt", "w") as f: | 94 with open("output.txt", "w") as f: |
92 f.write(message_content) | 95 f.write(message_content) |
93 | 96 |
94 for image in image_urls: | 97 for image in image_files: |
95 client.files.delete(image["image_file"]["file_id"]) | 98 client.files.delete(image["image_file"]["file_id"]) |
96 if file_search_file_streams: | 99 if file_search_file_streams: |
97 client.beta.vector_stores.delete(vector_store.id) | 100 client.beta.vector_stores.delete(vector_store.id) |
98 client.beta.threads.delete(thread.id) | 101 client.beta.threads.delete(thread.id) |
99 client.beta.assistants.delete(assistant.id) | 102 client.beta.assistants.delete(assistant.id) |