Mercurial > repos > bgruening > chatgpt_openai_api
comparison chatgpt.py @ 0:f256dc85e1a0 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/chatgpt commit 840e66e7f23cf775ebd3083c6e07d1819b17f74c
author | bgruening |
---|---|
date | Tue, 13 Aug 2024 14:25:19 +0000 |
parents | |
children | 08c658e9aa9e |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:f256dc85e1a0 |
---|---|
1 import os | |
2 import sys | |
3 | |
4 from openai import OpenAI | |
5 | |
6 context_files = sys.argv[1].split(",") | |
7 question = sys.argv[2] | |
8 model = sys.argv[3] | |
9 with open(sys.argv[4], "r") as f: | |
10 openai_api_key = f.read().strip() | |
11 if not openai_api_key: | |
12 raise Exception("OpenAI API key is not provided in user preferences!") | |
13 | |
14 client = OpenAI(api_key=openai_api_key) | |
15 | |
16 file_search_sup_ext = [ | |
17 "c", | |
18 "cs", | |
19 "cpp", | |
20 "doc", | |
21 "docx", | |
22 "html", | |
23 "java", | |
24 "json", | |
25 "md", | |
26 "pdf", | |
27 "php", | |
28 "pptx", | |
29 "py", | |
30 "rb", | |
31 "tex", | |
32 "txt", | |
33 "css", | |
34 "js", | |
35 "sh", | |
36 "ts", | |
37 ] | |
38 | |
39 vision_sup_ext = ["jpg", "jpeg", "png", "webp", "gif"] | |
40 | |
41 file_search_file_streams = [] | |
42 image_urls = [] | |
43 | |
44 for path in context_files: | |
45 ext = path.split(".")[-1].lower() | |
46 if ext in vision_sup_ext and model in ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo"]: | |
47 if os.path.getsize(path) > 20 * 1024 * 1024: | |
48 raise Exception(f"File {path} exceeds the 20MB limit and will not be processed.") | |
49 file = client.files.create(file=open(path, "rb"), purpose="vision") | |
50 promt = {"type": "image_file", "image_file": {"file_id": file.id}} | |
51 image_urls.append(promt) | |
52 | |
53 elif ext in file_search_sup_ext: | |
54 file_search_file_streams.append(open(path, "rb")) | |
55 else: | |
56 raise Exception("Not supported file!") | |
57 | |
58 assistant = client.beta.assistants.create( | |
59 instructions="You are going to get question about the file(s).", | |
60 model=model, | |
61 tools=[{"type": "file_search"}] if file_search_file_streams else None, | |
62 ) | |
63 if file_search_file_streams: | |
64 vector_store = client.beta.vector_stores.create() | |
65 file_batch = client.beta.vector_stores.file_batches.upload_and_poll( | |
66 vector_store_id=vector_store.id, files=file_search_file_streams | |
67 ) | |
68 assistant = client.beta.assistants.update( | |
69 assistant_id=assistant.id, tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}} | |
70 ) | |
71 | |
72 messages = [ | |
73 { | |
74 "role": "user", | |
75 "content": [ | |
76 { | |
77 "type": "text", | |
78 "text": question, | |
79 }, | |
80 *image_urls, | |
81 ], | |
82 } | |
83 ] | |
84 thread = client.beta.threads.create(messages=messages) | |
85 run = client.beta.threads.runs.create_and_poll(thread_id=thread.id, assistant_id=assistant.id) | |
86 messages = list(client.beta.threads.messages.list(thread_id=thread.id, run_id=run.id)) | |
87 | |
88 message_content = messages[0].content[0].text.value | |
89 print("Output has been saved!") | |
90 with open("output.txt", "w") as f: | |
91 f.write(message_content) | |
92 | |
93 for image in image_urls: | |
94 client.files.delete(image["image_file"]["file_id"]) | |
95 if file_search_file_streams: | |
96 client.beta.vector_stores.delete(vector_store.id) | |
97 client.beta.threads.delete(thread.id) | |
98 client.beta.assistants.delete(assistant.id) |