2
|
1 def gaussian(job):
|
|
2 inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
|
|
3 inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
|
|
4 src_comfile = inp_data[ "infile" ].file_name
|
|
5 comfile = open(src_comfile, 'r')
|
|
6 ppn='1'
|
|
7 pmem = '900mb'
|
|
8 overhead = 200
|
|
9 walltime = '11:59:59'
|
|
10 for line in comfile:
|
|
11 line = line.strip().lower()
|
|
12 if ( "%nproc=" in line):
|
|
13 try:
|
|
14 ppn = line.split('=')[1]
|
|
15 except Exception, e:
|
|
16 log.debug(e)
|
|
17 sys.stdout.write(e)
|
|
18 if ("%mem=" in line):
|
|
19 try:
|
|
20 mem_str = line.split('=')[1]
|
|
21 if ( "kb" in mem_str ):
|
|
22 mem_num = mem_str.split('kb')[0]
|
|
23 mem_num = int(int(mem_num) / 1024.0) + overhead
|
|
24 if ( "mb" in mem_str ):
|
|
25 mem_num = mem_str.split('mb')[0]
|
|
26 mem_num = int(mem_num) + overhead
|
|
27 elif ( "gb" in mem_str ):
|
|
28 mem_num = mem_str.split('gb')[0]
|
|
29 mem_num = (int(mem_num) * 1024) + overhead
|
|
30 elif ( "kw" in mem_str ):
|
|
31 mem_num = mem_str.split('kw')[0]
|
|
32 mem_num = int(int(mem_num) * 8 / 1024.0) + overhead
|
|
33 elif ( "mw" in mem_str ):
|
|
34 mem_num = mem_str.split('mw')[0]
|
|
35 mem_num = (int(mem_num) * 8) + overhead
|
|
36 elif ( "gw" in mem_str ):
|
|
37 mem_num = mem_str.split('gw')[0]
|
|
38 mem_num = (int(mem_num) * 8 * 1024) + overhead
|
|
39 else:
|
|
40 #Assume words
|
|
41 mem_num = int(int(mem_str) * 8 / 1024.0 / 1024.0) + overhead
|
|
42 pmem_num = int(mem_num / int(ppn))
|
|
43 pmem = str(pmem_num) + 'mb'
|
|
44 except Exception, e:
|
|
45 log.debug(e)
|
|
46 sys.stdout.write(e)
|
|
47 if ("!walltime=" in line):
|
|
48 try:
|
|
49 walltime = line.split('=')[1]
|
|
50 except Exception, e:
|
|
51 log.debug(e)
|
|
52 sys.stdout.write(e)
|
|
53 request = "-l nodes=1:ppn=%s,pmem=%s,walltime=%s" % (ppn, pmem, walltime)
|
|
54 return 'drmaa://%s/' % request
|