comparison data_manager_build_alfa_indexes/data_manager/data_manager_build_alfa_indexes.py @ 0:016200d4e379 draft

Uploaded
author charles-bernard
date Thu, 27 Oct 2016 05:15:05 -0400
parents
children 6f0be85be8fb
comparison
equal deleted inserted replaced
-1:000000000000 0:016200d4e379
1 #!/usr/bin/python
2
3 import sys
4 import shutil
5 import re
6 import urllib2
7 import subprocess
8 import gzip
9 import os
10 import tempfile
11 import logging
12 from optparse import OptionParser
13 from galaxy.util.json import from_json_string, to_json_string
14
15
16 def get_arg():
17 parser = OptionParser()
18 parser.add_option("-e", "--ensembl", dest = 'ensembl_info', action = "store", nargs = 2, metavar = ("kingdom", "species_name"), type = "str")
19 parser.add_option("-o", "--output", dest='output_filename', action="store", nargs = 1, metavar = 'JSON_FILE')
20 (options, args) = parser.parse_args()
21 return options, args
22
23 def cleanup_before_exit(tmp_dir):
24 if tmp_dir and os.path.exists(tmp_dir):
25 shutil.rmtree(tmp_dir)
26
27 def get_page_content(url):
28 req = urllib2.Request(url)
29 page = urllib2.urlopen(req)
30 return page.read()
31
32
33 def download_file(link, local_file_name):
34 req = urllib2.Request(link)
35 src_file = urllib2.urlopen(req)
36 local_file = open(local_file_name, 'wb')
37 local_file.write(src_file.read())
38 local_file.close()
39
40
41 def uncompress_gz(gz_file_name, uncompressed_file_name):
42 logging.info("____________________________________________________________")
43 logging.info("*** Uncompressing %s" % gz_file_name)
44 uncompressed_file = open(uncompressed_file_name, 'wb')
45 with gzip.open(gz_file_name, 'rb') as src_file:
46 uncompressed_file.write(src_file.read())
47 uncompressed_file.close()
48 logging.info("-> Uncompressed !\n")
49
50
51 def add_data_table_entry( data_manager_dict, data_table_entry ):
52 data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
53 data_manager_dict['data_tables']['alfa_indexes'] = data_manager_dict['data_tables'].get( 'alfa_indexes', data_table_entry )
54 return data_manager_dict
55
56
57 def standardize_species_name(species_name):
58 standard_species_name = re.sub(r'[)]$', '', species_name)
59 standard_species_name = re.sub(r'[ _),-.(=]+ *', '_', standard_species_name)
60 return standard_species_name.lower()
61
62
63 def get_ensembl_url_root(kingdom):
64 logging.info("____________________________________________________________")
65 logging.info("*** Determining Ensembl ftp root url")
66 if kingdom == 'vertebrates':
67 root = 'ftp://ftp.ensembl.org/pub/current_gtf/'
68 else:
69 root = 'ftp://ftp.ensemblgenomes.org/pub/%s/current/' % kingdom
70 logging.info("-> Determined !\n")
71 return root
72
73
74 def test_ensembl_species_exists(kingdom, url, species_name):
75 logging.info("____________________________________________________________")
76 logging.info ("*** Testing whether %s is referenced in Ensembl %s" % (species_name, kingdom))
77 list_species_file_name = 'species_Ensembl%s%s.txt' % (kingdom[0].upper(), kingdom[1:])
78 if kingdom=='vertebrates':
79 download_file(url, list_species_file_name)
80 else:
81 download_file(url + list_species_file_name, list_species_file_name)
82
83 grep_result = subprocess.Popen(['grep', species_name, list_species_file_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
84 species_lines_matched, grep_error = grep_result.communicate()
85 if grep_error != None or species_lines_matched == "":
86 msg = 'The species \'%s\' is not referenced on Ensembl (%s)' % (species_name, kingdom)
87 logging.critical(msg)
88 sys.exit(msg)
89
90 species_lines = species_lines_matched.split('\n')
91 del species_lines[-1]
92 nb_lines = len(species_lines)
93
94 if nb_lines == 1:
95 columns = species_lines[0].split('\t')
96 found_species_name = columns[1]
97 if species_name != found_species_name:
98 logging.info('-> \'%s\' has been replace with the complete species name \'%s\'' % (species_name, found_species_name))
99 return found_species_name, species_lines_matched
100 logging.info("-> Referenced !\n")
101 return species_name, species_lines_matched
102 else:
103 list_species = [''] * nb_lines
104 for i in range(0, nb_lines):
105 columns = species_lines[i].split('\t')
106 list_species[i] = columns[1]
107 exact_match = re.search('^%s$' % species_name, list_species[i])
108 if exact_match:
109 logging.info("-> Referenced !\n")
110 return species_name, species_lines[i]
111 msg = 'The string \'%s\' has been matched against the list of Ensembl Species but is not a complete species name.\nPlease retry with one of the following species names:\n%s' % (species_name, list_species[0:])
112 logging.critical(msg)
113 sys.exit(msg)
114
115
116 def get_ensembl_collection(kingdom, species_line):
117 logging.info("*** Extracting the %s_collection of the species" % kingdom)
118 collection_regex = re.compile('%s_.+_collection' % kingdom.lower())
119 collection_match = re.search(collection_regex, species_line)
120 if not collection_match:
121 logging.info("-> Skiped: this species is not classified in a Ensembl %s collection\n" % kingdom)
122 return None
123 logging.info("-> Extracted !\n")
124 return collection_match.group(0)
125
126
127 def get_ensembl_gtf_archive_name(url_dir, species_name):
128 logging.info("____________________________________________________________")
129 logging.info("*** Extracting the gtf archive name of %s" % species_name)
130 gtf_archive_regex = re.compile('%s\..*\.[0-9]+\.gtf\.gz' % species_name, flags = re.IGNORECASE)
131 dir_content = get_page_content(url_dir)
132 gtf_archive_match = re.search(gtf_archive_regex, dir_content)
133 if not gtf_archive_match:
134 sys.exit('The species is referenced on Ensembl but error of nomenclature led to download failure')
135 gtf_archive_name = gtf_archive_match.group(0)
136 logging.info("-> Extracted !\n")
137 return gtf_archive_name
138
139
140 def get_ensembl_gtf_archive(kingdom, url, species_name, species_line):
141 if kingdom != 'vertebrates':
142 url = url + 'gtf/'
143 if kingdom == 'bacteria' or kingdom == 'protists' or kingdom == 'fungi':
144 collection = get_ensembl_collection(kingdom, species_line)
145 if collection != None:
146 url = url + "%s/" % collection
147 final_url = url + species_name + '/'
148 gtf_archive_name = get_ensembl_gtf_archive_name(final_url, species_name)
149 logging.info("____________________________________________________________")
150 logging.info("*** Download the gtf archive of %s" % species_name)
151 download_file(final_url + gtf_archive_name, gtf_archive_name)
152 logging.info("-> Downloaded !\n")
153 return gtf_archive_name
154
155
156 def generate_alfa_indexes(path_to_alfa, gtf_file_name):
157 logging.info("____________________________________________________________")
158 logging.info("*** Generating alfa indexes from %s" % gtf_file_name)
159 alfa_result = subprocess.Popen(['python', path_to_alfa + 'ALFA.py', '-a', gtf_file_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
160 alfa_out, alfa_err = alfa_result.communicate()
161 if alfa_err != None and not re.search('### End of program', alfa_err):
162 msg = 'Generation Failed due an alfa error: %s' % (alfa_err)
163 logging.critical(msg)
164 sys.exit(msg)
165 logging.info("-> Generated !\n")
166
167
168 def get_data_table_new_entry(gtf_archive_name):
169 info_list = gtf_archive_name.split('.')
170 species = info_list[0]
171 version = info_list[1]
172 release = info_list[2]
173 value = '%s_%s_%s' % (species, version, release)
174 dbkey = value
175 name = '%s: %s (release %s)' % (species, version, release)
176 prefix = '%s.%s.%s' % (species, version, release)
177 entry_dict = { 'species': species, 'version': version, 'release': release, 'value': value, 'dbkey': dbkey, 'name': name, 'prefix': prefix }
178 return entry_dict
179
180
181 def main():
182 options, args = get_arg()
183 galaxy_root_dir = args[0]
184
185 path_to_alfa = os.path.join(galaxy_root_dir, 'tools/alfa/')
186 path_to_tmp_dir = os.path.join(galaxy_root_dir, 'database/tmp/')
187
188 if options.output_filename == None:
189 msg = 'No json output file specified'
190 logging.critical(msg)
191 sys.exit(msg)
192 output_filename = options.output_filename
193 params = from_json_string(open(output_filename).read())
194 target_directory = params['output_data'][0]['extra_files_path']
195 os.mkdir(target_directory)
196
197 tmp_dir = tempfile.mkdtemp(prefix='tmp', suffix='', dir=path_to_tmp_dir)
198 os.chdir(tmp_dir)
199 log_file_name = 'galaxy_log_report.log'
200 logging.basicConfig(level=logging.INFO, filename=log_file_name, filemode="a+", format='%(message)s')
201 data_manager_dict = {}
202
203 if options.ensembl_info:
204 kingdom, species_name = options.ensembl_info
205 species_name = standardize_species_name(species_name)
206 url = get_ensembl_url_root(kingdom)
207 species_name, species_line = test_ensembl_species_exists(kingdom, url, species_name)
208 gtf_archive_name = get_ensembl_gtf_archive(kingdom, url, species_name, species_line)
209 data_table_entry = get_data_table_new_entry(gtf_archive_name)
210 gtf_file_name = '%s.gtf' % data_table_entry['prefix']
211 uncompress_gz(gtf_archive_name, gtf_file_name)
212 generate_alfa_indexes(path_to_alfa, gtf_file_name)
213 stranded_index_name = '%s.stranded.index' % data_table_entry['prefix']
214 unstranded_index_name = '%s.unstranded.index' % data_table_entry['prefix']
215 add_data_table_entry(data_manager_dict, data_table_entry)
216
217 logging.info("____________________________________________________________")
218 logging.info("*** General Info")
219 logging.info("TMP DIR:\t%s" % tmp_dir)
220 logging.info("TARGET DIR:\t%s" % target_directory)
221 logging.info("URL ROOT:\t%s" % url)
222 logging.info("SPECIES:\t%s" % data_table_entry['species'])
223 logging.info("VERSION:\t%s" % data_table_entry['version'])
224 logging.info("RELEASE:\t%s" % data_table_entry['release'])
225 logging.info("VALUE:\t%s" % data_table_entry['value'])
226 logging.info("DBKEY:\t%s" % data_table_entry['dbkey'])
227 logging.info("NAME:\t%s" % data_table_entry['name'])
228 logging.info("PREFIX:\t%s" % data_table_entry['prefix'])
229 logging.info("____________________________________________________________")
230 logging.info("*** Intial dictionary")
231 logging.info("%s" % params)
232
233 shutil.copyfile(stranded_index_name, os.path.join(target_directory, stranded_index_name))
234 shutil.copyfile(unstranded_index_name, os.path.join(target_directory, unstranded_index_name))
235 shutil.copyfile(log_file_name, os.path.join(target_directory, log_file_name))
236
237 cleanup_before_exit(tmp_dir)
238
239 open(output_filename, 'wb').write(to_json_string(data_manager_dict))
240 main()