changeset 6:03842a4f71c6 draft

Uploaded
author jjohnson
date Fri, 17 Feb 2017 15:20:24 -0500
parents 19ae309ec53c
children 72c32037fa1e
files query_tabular.py query_tabular.xml test-data/._IEDB.tsv test-data/._netMHC_summary.tsv test-data/._query_results.tsv test-data/._regex_results.tsv test-data/._sales_results.tsv
diffstat 7 files changed, 224 insertions(+), 53 deletions(-) [+]
line wrap: on
line diff
--- a/query_tabular.py	Wed Apr 20 15:46:04 2016 -0400
+++ b/query_tabular.py	Fri Feb 17 15:20:24 2017 -0500
@@ -13,15 +13,24 @@
 TODO:
 - could read column names from comment lines, but issues with legal names
 - could add some transformations on tabular columns,
+  filter - skip_regex
   e.g. a regex to format date/time strings
     format: {
       c2 : re.sub('pat', 'sub', c2)
       c3 : len(c3)
-   }
-   def format(colname,val, expr):
+    }
+    def format(colname,val, expr):
+  normalize input list columns
+    iterate over list values creating one row per iteration
+      option for input line_num column
+    create associated table 
+      fk, name, value  # e.g. PSM table with list of proteins containing peptide
+      fk, name, value[, value] # if multiple columns similarly indexed, e.g. vcf
 - column_defs dict of columns to create from tabular input
     column_defs : { 'name1' : 'expr', 'name2' : 'expr'}
 - allow multiple queries and outputs
+  repeat min - max with up to max conditional outputs
+
 - add a --json input for table definitions (or yaml)
 JSON config:
 { tables : [
@@ -35,9 +44,11 @@
     },
     { file_path : '/home/galaxy/dataset_102.dat',
             table_name : 'gff',
-            column_names : ['seqname',,,'start','end']
+            column_names : ['seqname',,'date','start','end']
             comment_lines : 1
             load_named_columns : True
+            filters : [{'filter': 'regex', 'pattern': '#peptide', 'action': 'exclude_match'}, 
+                       {'filter': 'replace', 'column': 3, 'replace': 'gi[|]', 'pattern': ''}]
     },
     { file_path : '/home/galaxy/dataset_103.dat',
             table_name : 'test',
@@ -47,8 +58,71 @@
 }
 """
 
-tables_query = \
-    "SELECT name, sql FROM sqlite_master WHERE type='table' ORDER BY name"
+
+class LineFilter( object ):
+    def __init__(self,source,filter_dict):
+        self.source = source
+        self.filter_dict = filter_dict
+        print >> sys.stderr, 'LineFilter %s' % filter_dict if filter_dict else 'NONE'
+        self.func = lambda l: l.rstrip('\r\n') if l else None
+        if not filter_dict:
+            return
+        if filter_dict['filter'] == 'regex':
+            rgx = re.compile(filter_dict['pattern'])
+            if filter_dict['action'] == 'exclude_match':
+                self.func = lambda l: l if not rgx.match(l) else None
+            elif filter_dict['action'] == 'include_match':
+                self.func = lambda l: l if rgx.match(l) else None
+            elif filter_dict['action'] == 'exclude_find':
+                self.func = lambda l: l if not rgx.search(l) else None
+            elif filter_dict['action'] == 'include_find':
+                self.func = lambda l: l if rgx.search(l) else None
+        elif filter_dict['filter'] == 'replace':
+            p = filter_dict['pattern']
+            r = filter_dict['replace']
+            c = int(filter_dict['column']) - 1
+            self.func = lambda l: '\t'.join([x if i != c else re.sub(p,r,x) for i,x in enumerate(l.split('\t'))])
+    def __iter__(self):
+        return self
+    def next(self):
+        for i,next_line in enumerate(self.source):
+            line = self.func(next_line)
+            if line:
+                return line
+        raise StopIteration
+
+
+class TabularReader:
+    """
+    Tabular file iterator. Returns a list 
+    """
+    def __init__(self, file_path, skip=0, comment_char=None, col_idx=None, filters=None):
+        self.skip = skip
+        self.comment_char = comment_char
+        self.col_idx = col_idx
+        self.filters = filters
+        self.tsv_file = open(file_path)
+        if skip and skip > 0:
+            for i in range(5): 
+                if not self.tsv_file.readline():
+                    break
+        source = LineFilter(self.tsv_file,None)
+        if comment_char:
+            source = LineFilter(source,{"filter": "regex", "pattern": comment_char, "action": "exclude_match"})
+        if filters:
+            for f in filters:
+                source = LineFilter(source,f)
+        self.source = source
+    def __iter__(self):
+        return self
+    def next(self):
+        ''' Iteration '''
+        for i,line in enumerate(self.source):
+            fields = line.rstrip('\r\n').split('\t')
+            if self.col_idx:
+                fields = [fields[i] for i in self.col_idx]
+            return fields
+        raise StopIteration
 
 
 def getValueType(val):
@@ -66,30 +140,25 @@
 
 
 def get_column_def(file_path, table_name, skip=0, comment_char='#',
-                   column_names=None, max_lines=100,load_named_columns=False):
+                   column_names=None, max_lines=100,load_named_columns=False,filters=None):
     col_pref = ['TEXT', 'REAL', 'INTEGER', None]
     col_types = []
     col_idx = None
     data_lines = 0
-
     try:
-        with open(file_path, "r") as fh:
-            for linenum, line in enumerate(fh):
-                if linenum < skip:
-                    continue
-                if line.startswith(comment_char):
-                    continue
-                data_lines += 1
-                try:
-                    fields = line.split('\t')
-                    while len(col_types) < len(fields):
-                        col_types.append(None)
-                    for i, val in enumerate(fields):
-                        colType = getValueType(val)
-                        if col_pref.index(colType) < col_pref.index(col_types[i]):
-                            col_types[i] = colType
-                except Exception, e:
-                    print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
+        tr = TabularReader(file_path,skip=skip, comment_char=comment_char, col_idx=None, filters=filters)
+        for linenum, fields in enumerate(tr):
+            if linenum > max_lines:
+                break
+            try:
+                while len(col_types) < len(fields):
+                    col_types.append(None)
+                for i, val in enumerate(fields):
+                    colType = getValueType(val)
+                    if col_pref.index(colType) < col_pref.index(col_types[i]):
+                        col_types[i] = colType
+            except Exception, e:
+                print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
     except Exception, e:
         print >> sys.stderr, 'Failed: %s' % (e)
     for i,col_type in enumerate(col_types):
@@ -117,11 +186,13 @@
     return col_names, col_types, col_def, col_idx
 
 
-def create_table(conn, file_path, table_name, skip=0, comment_char='#', pkey_autoincr=None, column_names=None,load_named_columns=False,unique_indexes=[],indexes=[]):
-    col_names, col_types, col_def, col_idx = get_column_def(file_path, table_name, skip=skip, comment_char=comment_char, column_names=column_names,load_named_columns=load_named_columns)
+def create_table(conn, file_path, table_name, skip=0, comment_char='#', pkey_autoincr=None, column_names=None,load_named_columns=False,filters=None,unique_indexes=[],indexes=[]):
+    
+    col_names, col_types, col_def, col_idx = get_column_def(file_path, table_name, skip=skip, comment_char=comment_char, 
+        column_names=column_names,load_named_columns=load_named_columns,filters=filters)
     col_func = [float if t == 'REAL' else int if t == 'INTEGER' else str for t in col_types]
     table_def = 'CREATE TABLE %s (\n    %s%s\n);' % (
-                table_name, 
+                table_name,
                 '%s INTEGER PRIMARY KEY AUTOINCREMENT,' % pkey_autoincr if pkey_autoincr else '',
                 ', \n    '.join(col_def))
     # print >> sys.stdout, table_def
@@ -142,25 +213,23 @@
             index_columns = index.split(',')
             create_index(conn, table_name, index_name, index_columns)
         c = conn.cursor()
-        with open(file_path, "r") as fh:
-            for linenum, line in enumerate(fh):
-                if linenum < skip or line.startswith(comment_char):
-                    continue
-                data_lines += 1
-                try:
-                    fields = line.rstrip('\r\n').split('\t')
-                    if col_idx:
-                        fields = [fields[i] for i in col_idx]
-                    vals = [col_func[i](x) if x else None for i, x in enumerate(fields)]
-                    c.execute(insert_stmt, vals)
-                except Exception, e:
-                    print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
+        tr = TabularReader(file_path,skip=skip, comment_char=comment_char, col_idx=col_idx, filters=filters)
+        for linenum, fields in enumerate(tr):
+            data_lines += 1
+            try:
+                if col_idx:
+                    fields = [fields[i] for i in col_idx]
+                vals = [col_func[i](x) if x else None for i, x in enumerate(fields)]
+                c.execute(insert_stmt, vals)
+            except Exception, e:
+                print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
         conn.commit()
         c.close()
     except Exception, e:
         print >> sys.stderr, 'Failed: %s' % (e)
         exit(1)
 
+
 def create_index(conn, table_name, index_name, index_columns, unique=False):
     index_def = "CREATE %s INDEX %s on %s(%s)" % ('UNIQUE' if unique else '', index_name, table_name, ','.join(index_columns))
     c = conn.cursor()
@@ -168,6 +237,7 @@
     conn.commit()
     c.close()
 
+
 def regex_match(expr, item):
     return re.match(expr, item) is not None
 
@@ -237,6 +307,7 @@
                     path = table['file_path']
                     table_name = table['table_name'] if 'table_name' in table else 't%d' % (ti + 1)
                     comment_lines = table['comment_lines'] if 'comment_lines' in table else 0
+                    comment_char = table['comment_char'] if 'comment_char' in table else None
                     column_names = table['column_names'] if 'column_names' in table else None
                     if column_names:
                         load_named_columns = table['load_named_columns'] if 'load_named_columns' in table else False
@@ -244,10 +315,11 @@
                         load_named_columns = False
                     unique_indexes = table['unique'] if 'unique' in table else []
                     indexes = table['index'] if 'index' in table else []
+                    filters = table['filters'] if 'filters' in table else None
                     pkey_autoincr = table['pkey_autoincr'] if 'pkey_autoincr' in table else None
                     create_table(conn, path, table_name, pkey_autoincr=pkey_autoincr, column_names=column_names, 
-                                 skip=comment_lines, load_named_columns=load_named_columns, 
-                                 unique_indexes=unique_indexes, indexes=indexes)
+                                 skip=comment_lines, comment_char=comment_char, load_named_columns=load_named_columns, 
+                                 filters=filters,unique_indexes=unique_indexes, indexes=indexes)
         except Exception, exc:
             print >> sys.stderr, "Error: %s" % exc
     conn.close()
@@ -262,6 +334,8 @@
         query = options.query
 
     if (query is None):
+        tables_query = \
+            "SELECT name, sql FROM sqlite_master WHERE type='table' ORDER BY name"
         try:
             conn = get_connection(options.sqlitedb)
             c = conn.cursor()
--- a/query_tabular.xml	Wed Apr 20 15:46:04 2016 -0400
+++ b/query_tabular.xml	Fri Feb 17 15:20:24 2017 -0500
@@ -1,4 +1,4 @@
-<tool id="query_tabular" name="Query Tabular" version="0.1.3">
+<tool id="query_tabular" name="Query Tabular" version="2.0.0">
     <description>using sqlite sql</description>
 
     <requirements>
@@ -6,8 +6,15 @@
     <stdio>
         <exit_code range="1:" />
     </stdio>
-    <command interpreter="python"><![CDATA[
-        query_tabular.py 
+    <command><![CDATA[
+        #if $add_to_database.withdb: 
+            #if $save_db:
+                cp "$add_to_database.withdb" "$save_db" &&
+            #else:
+                cp "$add_to_database.withdb" "$workdb" &&
+            #end if 
+        #end if
+        python $__tool_directory__/query_tabular.py 
         #if $save_db
         -s $sqlitedb
         #else
@@ -19,6 +26,8 @@
           $no_header
           -o $output
         #end if
+        && cat $query_file
+        && cat $table_json
     ]]></command>
     <configfiles>
         <configfile name="query_file">
@@ -53,11 +62,6 @@
   #set $col_names = ''
   #end if
   #set $jtbl['column_names'] = $col_names
-  #if str($tbl.tbl_opts.skip_lines) != '':
-    #set $jtbl['comment_lines'] = int($tbl.tbl_opts.skip_lines)
-  #elif $tbl.table.metadata.comment_lines and $tbl.table.metadata.comment_lines > 0:
-    #set $jtbl['comment_lines'] = int($tbl.table.metadata.comment_lines)
-  #end if
   #set $idx_unique = []
   #set $idx_non = []
   #for $idx in $tbl.tbl_opts.indexes:
@@ -73,6 +77,40 @@
   #if len($idx_non) > 0:
     #set $jtbl['index'] = $idx_non
   #end if
+  #set $input_filters = []
+  #for $fi in $tbl.input_opts.linefilters:
+    #if $fi.filter.filter_type == 'skip':
+      #if str($tbl.tbl_opts.skip_lines) != '':
+        #set $jtbl['comment_lines'] = int($fi.filter.skip_lines)
+      #elif $tbl.table.metadata.comment_lines and $tbl.table.metadata.comment_lines > 0:
+        #set $jtbl['comment_lines'] = int($tbl.table.metadata.comment_lines)
+      #end if
+    #elif $fi.filter.filter_type == 'comment':
+      #set $jtbl['comment_char'] = str($fi.filter.comment_char)
+    #elif $fi.filter.filter_type == 'regex':
+      #set $filter_dict = dict()
+      #set $filter_dict['filter'] = str($fi.filter.filter_type)
+      #set $filter_dict['pattern'] = str($fi.filter.regex_pattern)
+      #set $filter_dict['action'] = str($fi.filter.regex_action)
+      #silent $input_filters.append($filter_dict)
+    #elif $fi.filter.filter_type == 'replace':
+      #set $filter_dict = dict()
+      #set $filter_dict['filter'] = str($fi.filter.filter_type)
+      #set $filter_dict['column'] = int(str($fi.filter.column))
+      #set $filter_dict['pattern'] = str($fi.filter.regex_pattern)
+      #set $filter_dict['replace'] = str($fi.filter.regex_replace)
+      #silent $input_filters.append($filter_dict)
+    ## #elif $fi.filter.filter_type == 'normalize':
+    ##   #set $filter_dict = dict()
+    ##   #set $filter_dict['filter'] = str($fi.filter.filter_type)
+    ##   #set $filter_dict['columns'] = [int(str($ci)) for $ci in str($fi.filter.columns).split(',')]
+    ##   #set $filter_dict['separator'] = str($fi.filter.separator)
+    ##   #silent $input_filters.append($filter_dict)
+    #end if
+  #end for
+  #if $input_filters:
+    #set $jtbl['filters'] = $input_filters
+  #end if
   #set $jtbls += [$jtbl]
 #end for
 #echo $json.dumps($jtbldef)
@@ -80,8 +118,66 @@
     </configfiles>
     <inputs>
         <param name="workdb" type="hidden" value="workdb.sqlite" label=""/>
+        <section name="add_to_database" expanded="false" title="Add tables to an existing database">
+            <param name="withdb" type="data" format="sqlite" optional="true" label="Add tables to this Database" 
+               help="Make sure your added table names are not already in this database"/>
+        </section>
         <repeat name="tables" title="Database Table" min="1">
             <param name="table" type="data" format="tabular" label="Tabular Dataset for Table"/>
+            <section name="input_opts" expanded="false" title="Filter Dataset Input">
+                <repeat name="linefilters" title="Filter Tabular Input Lines">
+                    <conditional name="filter">
+                        <param name="filter_type" type="select" label="Filter By">
+                            <option value="skip">skip leading lines</option>
+                            <option value="comment">comment char</option>
+                            <option value="regex">by regex expression matching</option>
+                            <option value="replace">regex replace value in column</option>
+                            <!--
+                            <option value="normalize">normalize list columns, replicates row for each item in list</option>
+                            -->
+                        </param>
+                        <when value="skip">
+                             <param name="skip_lines" type="integer" value="" min="0" label="Skip lines" 
+                                 help="Leave blank to use the comment lines metadata for this dataset" />
+                        </when>
+                        <when value="comment">
+                            <param name="comment_char" type="text" value="#" label="Comment line starting text">
+                            </param>
+                        </when>
+                        <when value="regex">
+                            <param name="regex_pattern" type="text" value="" label="regex pattern">
+                                <sanitizer sanitize="False"/>
+                            </param>
+                            <param name="regex_action" type="select" label="action for regex match">
+                                <option value="exclude_match">exclude line on pattern match</option>
+                                <option value="include_match">include line on pattern match</option>
+                                <option value="exclude_find">exclude line if pattern found</option>
+                                <option value="include_find">include line if pattern found</option>
+                            </param>
+                        </when>
+                        <when value="replace">
+                            <param name="column" type="data_column" data_ref="table" label="Column to replace text"
+                                   help=""/>
+                            <param name="regex_pattern" type="text" value="" label="regex pattern">
+                                <sanitizer sanitize="False"/>
+                            </param>
+                            <param name="regex_replace" type="text" value="" label="replacement expression">
+                                <sanitizer sanitize="False"/>
+                            </param>
+                        </when>
+                        <!--
+                        <when value="normalize">
+                            <param name="columns" type="data_column" data_ref="table" multiple="True" label="Columns to split"
+                                   help=""/>
+                            <param name="separator" type="text" value="," label="List item delimiter in column">
+                                <sanitizer sanitize="False"/>
+                                <validator type="regex" message="Anything but TAB or Newline">^[^\t\n\r\f\v]+$</validator>
+                            </param>
+                        </when>
+                        -->
+                    </conditional>
+                </repeat>
+            </section>
             <section name="tbl_opts" expanded="false" title="Table Options">
                 <param name="table_name" type="text" value="" optional="true" label="Specify Name for Table">
                     <help>By default, tables will be named: t1,t2,...,tn (table names must be unique)</help>
@@ -97,7 +193,6 @@
                        help="Only creates this additional column when a name is entered. (This can not be the same name as any of the other columns in this table.)">
                         <validator type="regex" message="Column name">^([A-Za-z]\w*)?$</validator>
                 </param>
-                <param name="skip_lines" type="integer" value="" min="0" optional="true" label="Skip lines" help="Leave blank to use the comment lines metadata for this dataset" />
                 <repeat name="indexes" title="Table Index">
                     <param name="unique" type="boolean" truevalue="yes" falsevalue="no" checked="False" label="This is a unique index"/>
                     <param name="index_columns" type="text" value="" label="Index on Columns">
@@ -108,7 +203,7 @@
             </section>
         </repeat>
         <param name="save_db" type="boolean" truevalue="yes" falsevalue="no" checked="false" label="Save the sqlite database in your history"/>
-        <param name="sqlquery" type="text" area="true" size="10x80" value="" optional="true" label="SQL Query to generate tabular output">
+        <param name="sqlquery" type="text" area="true" size="20x80" value="" optional="true" label="SQL Query to generate tabular output">
                 <help>By default: tables are named: t1,t2,...,tn and columns in each table: c1,c2,...,cn</help>
                 <sanitizer sanitize="False"/>
                 <validator type="regex" message="">^(?ims)\s*select\s+.*\s+from\s+.*$</validator>
@@ -186,6 +281,9 @@
 
   Loads tabular datasets into a SQLite_ data base.  
 
+  An existing SQLite_ data base can be used as input, and any selected tabular datasets will be added as new tables in that data base.
+
+
 **Outputs**
 
   The results of a SQL query are output to the history as a tabular file.
@@ -195,7 +293,6 @@
     *(The* **SQLite to tabular** *tool can run additional queries on this database.)*
 
 
-
 For help in using SQLite_ see:  http://www.sqlite.org/docs.html
 
 **NOTE:** input for SQLite dates input field must be in the format: *YYYY-MM-DD* for example: 2015-09-30
Binary file test-data/._IEDB.tsv has changed
Binary file test-data/._netMHC_summary.tsv has changed
Binary file test-data/._query_results.tsv has changed
Binary file test-data/._regex_results.tsv has changed
Binary file test-data/._sales_results.tsv has changed