changeset 0:926c62f7fa09 draft

planemo upload for repository https://github.com/jj-umn/galaxytools/tree/master/query_tabular commit 9ae87502ea7c3da33ecc453872c4eb2f41ecea4a-dirty
author jjohnson
date Thu, 21 Jan 2016 08:23:45 -0500
parents
children c7a1a686e42b
files query_tabular.py query_tabular.xml test-data/IEDB.tsv test-data/customers.tsv test-data/netMHC_summary.tsv test-data/query_results.tsv test-data/regex_results.tsv test-data/sales.tsv test-data/sales_results.tsv
diffstat 9 files changed, 613 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/query_tabular.py	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+"""
+"""
+import sys
+import re
+import os.path
+import json
+import sqlite3 as sqlite
+import optparse
+from optparse import OptionParser
+
+"""
+TODO:
+- could read column names from comment lines, but issues with legal names
+- could add some transformations on tabular columns,
+  e.g. a regex to format date/time strings
+    c2 : re.sub('pat', 'sub', c2)
+    c3 :
+- column_defs dict of columns to create from tabular input
+    column_defs : { 'name1' : 'expr', 'name2' : 'expr'}
+- allow multiple queries and outputs
+- add a --json input for table definitions (or yaml)
+JSON config:
+{ tables : [
+    { file_path : '/home/galaxy/dataset_101.dat',
+            table_name : 't1',
+            column_names : ['c1', 'c2', 'c3'],
+            comment_lines : 1
+    },
+    { file_path : '/home/galaxy/dataset_102.dat',
+            table_name : 't2',
+            column_names : ['c1', 'c2', 'c3']
+    },
+    { file_path : '/home/galaxy/dataset_103.dat',
+            table_name : 'test',
+            column_names : ['c1', 'c2', 'c3']
+    }
+    ]
+}
+"""
+
+tables_query = \
+    "SELECT name, sql FROM sqlite_master WHERE type='table' ORDER BY name"
+
+
+def getValueType(val):
+    if val or 0. == val:
+        try:
+            int(val)
+            return 'INTEGER'
+        except:
+            try:
+                float(val)
+                return 'REAL'
+            except:
+                return 'TEXT'
+    return None
+
+
+def get_column_def(file_path, table_name, skip=0, comment_char='#',
+                   column_names=None, max_lines=100):
+    col_pref = ['TEXT', 'REAL', 'INTEGER', None]
+    col_types = []
+    data_lines = 0
+    try:
+        with open(file_path, "r") as fh:
+            for linenum, line in enumerate(fh):
+                if linenum < skip:
+                    continue
+                if line.startswith(comment_char):
+                    continue
+                data_lines += 1
+                try:
+                    fields = line.split('\t')
+                    while len(col_types) < len(fields):
+                        col_types.append(None)
+                    for i, val in enumerate(fields):
+                        colType = getValueType(val)
+                        if col_pref.index(colType) < col_pref.index(col_types[i]):
+                            col_types[i] = colType
+                except Exception, e:
+                    print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
+    except Exception, e:
+        print >> sys.stderr, 'Failed: %s' % (e)
+    for i, col_type in enumerate(col_types):
+        if not col_type:
+            col_types[i] = 'TEXT'
+    col_names = ['c%d' % i for i in range(1, len(col_types) + 1)]
+    if column_names:
+        for i, cname in enumerate([cn.strip() for cn in column_names.split(',')]):
+            if cname and i < len(col_names):
+                col_names[i] = cname
+    col_def = []
+    for i, col_name in enumerate(col_names):
+        col_def.append('%s %s' % (col_names[i], col_types[i]))
+    return col_names, col_types, col_def
+
+
+def create_table(conn, file_path, table_name, skip=0, comment_char='#', column_names=None):
+    col_names, col_types, col_def = get_column_def(file_path, table_name, skip=skip, comment_char=comment_char, column_names=column_names)
+    col_func = [float if t == 'REAL' else int if t == 'INTEGER' else str for t in col_types]
+    table_def = 'CREATE TABLE %s (\n    %s\n);' % (table_name, ', \n    '.join(col_def))
+    # print >> sys.stdout, table_def
+    insert_stmt = 'INSERT INTO %s(%s) VALUES(%s)' % (table_name, ','.join(col_names), ','.join(["?" for x in col_names]))
+    # print >> sys.stdout, insert_stmt
+    data_lines = 0
+    try:
+        c = conn.cursor()
+        c.execute(table_def)
+        with open(file_path, "r") as fh:
+            for linenum, line in enumerate(fh):
+                if linenum < skip or line.startswith(comment_char):
+                    continue
+                data_lines += 1
+                try:
+                    fields = line.rstrip('\r\n').split('\t')
+                    vals = [col_func[i](x) if x else None for i, x in enumerate(fields)]
+                    c.execute(insert_stmt, vals)
+                except Exception, e:
+                    print >> sys.stderr, 'Failed at line: %d err: %s' % (linenum, e)
+        conn.commit()
+        c.close()
+    except Exception, e:
+        print >> sys.stderr, 'Failed: %s' % (e)
+        exit(1)
+
+
+def regex_match(expr, item):
+    return re.match(expr, item) is not None
+
+
+def regex_search(expr, item):
+    return re.search(expr, item) is not None
+
+
+def regex_sub(expr, replace, item):
+    return re.sub(expr, replace, item)
+
+
+def get_connection(sqlitedb_path, addfunctions=False):
+    conn = sqlite.connect(sqlitedb_path)
+    if addfunctions:
+        conn.create_function("re_match", 2, regex_match)
+        conn.create_function("re_search", 2, regex_search)
+        conn.create_function("re_sub", 3, regex_sub)
+    return conn
+
+
+def __main__():
+    # Parse Command Line
+    parser = optparse.OptionParser()
+    parser.add_option('-s', '--sqlitedb', dest='sqlitedb', default=None, help='The SQLite Database')
+    parser.add_option('-t', '--table', dest='tables', action="append", default=[], help='Tabular file: file_path[=table_name[:column_name, ...]')
+    parser.add_option('-j', '--jsonfile', dest='jsonfile', default=None, help='Tabular file: file_path[=table_name[:column_name, ...]')
+    parser.add_option('-q', '--query', dest='query', default=None, help='SQL query')
+    parser.add_option('-Q', '--query_file', dest='query_file', default=None, help='SQL query file')
+    parser.add_option('-n', '--no_header', dest='no_header', action='store_true', default=False, help='Include a column headers line')
+    parser.add_option('-o', '--output', dest='output', default=None, help='Output file for query results')
+    (options, args) = parser.parse_args()
+
+    # open sqlite connection
+    conn = get_connection(options.sqlitedb)
+    # determine output destination
+    if options.output is not None:
+        try:
+            outputPath = os.path.abspath(options.output)
+            outputFile = open(outputPath, 'w')
+        except Exception, e:
+            print >> sys.stderr, "failed: %s" % e
+            exit(3)
+    else:
+        outputFile = sys.stdout
+
+    # get table defs
+    if options.tables:
+        for ti, table in enumerate(options.tables):
+            table_name = 't%d' % (ti + 1)
+            column_names = None
+            fields = table.split('=')
+            path = fields[0]
+            if len(fields) > 1:
+                names = fields[1].split(':')
+                table_name = names[0] if names[0] else table_name
+                if len(names) > 1:
+                    column_names = names[1]
+            # print >> sys.stdout, '%s %s' % (table_name, path)
+            create_table(conn, path, table_name, column_names=column_names)
+    if options.jsonfile:
+        try:
+            fh = open(options.jsonfile)
+            tdef = json.load(fh)
+            if 'tables' in tdef:
+                for ti, table in enumerate(tdef['tables']):
+                    path = table['file_path']
+                    table_name = table['table_name'] if 'table_name' in table else 't%d' % (ti + 1)
+                    column_names = table['column_names'] if 'column_names' in table else None
+                    comment_lines = table['comment_lines'] if 'comment_lines' in table else 0
+                    create_table(conn, path, table_name, column_names=column_names, skip=comment_lines)
+        except Exception, exc:
+            print >> sys.stderr, "Error: %s" % exc
+    conn.close()
+
+    query = None
+    if (options.query_file is not None):
+        with open(options.query_file, 'r') as fh:
+            query = ''
+            for line in fh:
+                query += line
+    elif (options.query is not None):
+        query = options.query
+
+    if (query is None):
+        try:
+            conn = get_connection(options.sqlitedb)
+            c = conn.cursor()
+            rslt = c.execute(tables_query).fetchall()
+            for table, sql in rslt:
+                print >> sys.stderr, "Table %s:" % table
+                try:
+                    col_query = 'SELECT * FROM %s LIMIT 0' % table
+                    cur = conn.cursor().execute(col_query)
+                    cols = [col[0] for col in cur.description]
+                    print >> sys.stderr, " Columns: %s" % cols
+                except Exception, exc:
+                    print >> sys.stderr, "Error: %s" % exc
+        except Exception, exc:
+            print >> sys.stderr, "Error: %s" % exc
+        exit(0)
+    # if not sqlite.is_read_only_query(query):
+    #    print >> sys.stderr, "Error: Must be a read only query"
+    #    exit(2)
+    try:
+        conn = get_connection(options.sqlitedb, addfunctions=True)
+        cur = conn.cursor()
+        results = cur.execute(query)
+        if not options.no_header:
+            outputFile.write("#%s\n" % '\t'.join([str(col[0]) for col in cur.description]))
+            # yield [col[0] for col in cur.description]
+        for i, row in enumerate(results):
+            # yield [val for val in row]
+            outputFile.write("%s\n" % '\t'.join([str(val) for val in row]))
+    except Exception, exc:
+        print >> sys.stderr, "Error: %s" % exc
+        exit(1)
+
+if __name__ == "__main__":
+    __main__()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/query_tabular.xml	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,303 @@
+<tool id="query_tabular" name="Query Tabular" version="0.1.0">
+    <description>using sqlite sql</description>
+
+    <requirements>
+    </requirements>
+    <stdio>
+        <exit_code range="1:" />
+    </stdio>
+    <command interpreter="python"><![CDATA[
+        query_tabular.py 
+        #if $save_db
+        -s $sqlitedb
+        #else
+        -s $workdb
+        #end if
+        -j $table_json
+        #*
+        ##          #for $i,$tbl in enumerate($tables):
+        ##            #if $tbl.table_name
+        ##            #set $tname = $tbl.table_name
+        ##            #else
+        ##            #set $tname = 't' + str($i + 1) 
+        ##            #end if
+        ##            #if $tbl.col_names:
+        ##            #set $col_names = ':' + str($tbl.col_names)
+        ##            #else 
+        ##            #set $col_names = ''
+        ##            #end if
+        ##            -t ${tbl.table}=${tname}${$col_names}
+        ##          #end for
+        *#
+        #if $sqlquery:
+          -Q "$query_file" 
+          $no_header
+          -o $output
+        #end if
+    ]]></command>
+    <configfiles>
+        <configfile name="query_file">
+$sqlquery
+        </configfile>
+        <configfile name="table_json">
+#import json
+#set $jtbldef = dict()
+#set $jtbls = []
+#set $jtbldef['tables'] = $jtbls
+#for $i,$tbl in enumerate($tables):
+  #set $jtbl = dict()
+  #set $jtbl['file_path'] = str($tbl.table)
+  #if $tbl.table_name
+  #set $tname = str($tbl.table_name)
+  #else
+  #set $tname = 't' + str($i + 1) 
+  #end if
+  #set $jtbl['table_name'] = $tname
+  #if $tbl.col_names:
+  #set $col_names = str($tbl.col_names)
+  #else 
+  #set $col_names = ''
+  #end if
+  #set $jtbl['column_names'] = $col_names
+  #if str($tbl.skip_lines) != '':
+    #set $jtbl['comment_lines'] = $tbl.skip_lines
+  #elif $tbl.table.metadata.comment_lines > 0:
+    #set $jtbl['comment_lines'] = int($tbl.table.metadata.comment_lines)
+  #end if
+  #set $jtbls += [$jtbl]
+#end for
+#echo $json.dumps($jtbldef)
+        </configfile>
+    </configfiles>
+    <inputs>
+        <param name="workdb" type="hidden" value="workdb.sqlite" label=""/>
+        <repeat name="tables" title="Add tables" min="1">
+            <param name="table" type="data" format="tabular" label="Dataset"/>
+            <param name="table_name" type="text" value="" optional="true" label="Table name">
+                <help>By default, tables will be named: t1,t2,...,tn</help>
+                <validator type="regex" message="Table name should start with a letter and may contain additional letters, digits, and underscores">^[A-Za-z]\w*$</validator>
+            </param>
+            <!--
+            <param name="sel_cols" label="Include columns" type="data_column" multiple="true" data_ref="table" />
+            -->
+            <param name="col_names" type="text" value="" optional="true" label="Column names">
+                <help>By default, table columns will be named: c1,c2,c3,...,cn</help>
+                <validator type="regex" message="A List of separated by commas: Column names should start with a letter and may contain additional letters, digits, and underscores">^([A-Za-z]\w*)?(,([A-Za-z]\w*)?)*$</validator>
+            </param>
+            <param name="skip_lines" type="integer" value="" min="0" optional="true" label="Skip lines" help="Leave blank to use the datatype comment lines metadata" />
+        </repeat>
+        <param name="sqlquery" type="text" area="true" size="10x80" value="" optional="true" label="SQL Query">
+                <help>By default, tables will be named: t1,t2,...,tn</help>
+                <sanitizer sanitize="False"/>
+                <validator type="regex" message="">^(?i)\s*select\s+.*\s+from\s+.*$</validator>
+        </param>
+        <param name="no_header" type="boolean" truevalue="-n" falsevalue="" checked="False" label="Omit column headers"/>
+
+        <param name="save_db" type="boolean" truevalue="yes" falsevalue="no" checked="false" label="Save the sqlite database"/>
+    </inputs>
+    <outputs>
+        <data format="sqlite" name="sqlitedb" label="sqlite db of ${on_string}">
+            <filter>save_db or not (sqlquery and len(sqlquery) > 0)</filter>
+        </data>
+        <data format="tabular" name="output" label="query results on ${on_string}">
+            <filter>sqlquery and len(sqlquery) > 0</filter>
+        </data>
+    </outputs>
+    <tests>
+
+        <test>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="customers.tsv"/>
+                <param name="table_name" value="customers"/>
+                <param name="col_names" value="CustomerID,FirstName,LastName,Email,DOB,Phone"/>
+            </repeat>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="sales.tsv"/>
+                <param name="table_name" value="sales"/>
+                <param name="col_names" value="CustomerID,Date,SaleAmount"/>
+            </repeat>
+            <param name="sqlquery" value="SELECT FirstName,LastName,sum(SaleAmount) as &quot;TotalSales&quot; FROM customers join sales on customers.CustomerID = sales.CustomerID GROUP BY customers.CustomerID ORDER BY TotalSales DESC"/>
+            <output name="output" file="sales_results.tsv"/>
+        </test>
+
+        <test>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="customers.tsv"/>
+                <param name="col_names" value=",FirstName,LastName,,DOB,"/>
+            </repeat>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="sales.tsv"/>
+            </repeat>
+            <param name="sqlquery" value="SELECT FirstName,LastName,sum(t2.c3) as &quot;TotalSales&quot; FROM t1 join t2 on t1.c1 = t2.c1 GROUP BY t1.c1 ORDER BY TotalSales DESC;"/>
+            <output name="output" file="sales_results.tsv"/>
+        </test>
+
+        <test>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="customers.tsv"/>
+                <param name="col_names" value=",FirstName,LastName,,BirthDate,"/>
+            </repeat>
+            <param name="sqlquery" value="select FirstName,LastName,re_sub('^\d{2}(\d{2})-(\d\d)-(\d\d)','\3/\2/\1',BirthDate) as &quot;DOB&quot; from t1 WHERE re_search('[hp]er',c4)"/>
+            <output name="output" file="regex_results.tsv"/>
+        </test>
+
+        <test>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="IEDB.tsv"/>
+                <param name="table_name" value="iedb"/>
+                <param name="col_names" value="ID,allele,seq_num,start,end,length,peptide,method,percentile_rank,ann_ic50,ann_rank,smm_ic50,smm_rank,comblib_sidney2008_score,comblib_sidney2008_rank,netmhcpan_ic50,netmhcpan_rank"/>
+            </repeat>
+            <repeat name="tables">
+                <param name="table" ftype="tabular" value="netMHC_summary.tsv"/>
+                <param name="table_name" value="mhc_summary"/>
+                <param name="col_names" value="pos,peptide,logscore,affinity,Bind_Level,Protein,Allele"/>
+            </repeat>
+            <param name="sqlquery" value="select iedb.ID,iedb.peptide,iedb.start,iedb.end,iedb.percentile_rank,mhc_summary.logscore,mhc_summary.affinity,mhc_summary.Bind_Level from iedb left outer join mhc_summary on iedb.peptide = mhc_summary.peptide order by affinity,Bind_Level"/>
+            <output name="output" file="query_results.tsv"/>
+        </test>
+
+    </tests>
+    <help><![CDATA[
+=============
+Query Tabular
+=============
+
+**Inputs**
+
+  Loads tabular datasets into a SQLite_ data base.  
+
+**Outputs**
+
+  The results of a SQL query are output to the history as a tabular file.
+
+  The SQLite_ data base can also be saved and output as a dataset in the history.  
+
+
+For help in using SQLite_ see:  http://www.sqlite.org/docs.html
+
+**NOTE:** input for SQLite dates input field must be in the format: *YYYY-MM-DD* for example: 2015-09-30
+
+See: http://www.sqlite.org/lang_datefunc.html
+
+**Example** 
+
+  Given 2 tabular datasets: *customers* and *sales*
+  
+   Dataset *customers*
+  
+    Table name: "customers"
+  
+    Column names: "CustomerID,FirstName,LastName,Email,DOB,Phone"
+  
+    =========== ========== ========== ===================== ========== ============
+    #CustomerID FirstName  LastName   Email                 DOB        Phone
+    =========== ========== ========== ===================== ========== ============
+    1           John       Smith      John.Smith@yahoo.com  1968-02-04 626 222-2222
+    2           Steven     Goldfish   goldfish@fishhere.net 1974-04-04 323 455-4545
+    3           Paula      Brown      pb@herowndomain.org   1978-05-24 416 323-3232
+    4           James      Smith      jim@supergig.co.uk    1980-10-20 416 323-8888
+    =========== ========== ========== ===================== ========== ============
+  
+   Dataset *sales*
+  
+    Table name: "sales"
+  
+    Column names: "CustomerID,Date,SaleAmount"
+  
+    =============  ============  ============
+      #CustomerID    Date          SaleAmount
+    =============  ============  ============
+               2    2004-05-06         100.22
+               1    2004-05-07          99.95
+               3    2004-05-07         122.95
+               3    2004-05-13         100.00
+               4    2004-05-22         555.55
+    =============  ============  ============
+  
+  The query
+  
+  ::
+  
+    SELECT FirstName,LastName,sum(SaleAmount) as "TotalSales" 
+    FROM customers join sales on customers.CustomerID = sales.CustomerID 
+    GROUP BY customers.CustomerID ORDER BY TotalSales DESC;
+  
+  Produces this tabular output:
+  
+    ========== ======== ==========
+    #FirstName LastName TotalSales
+    ========== ======== ==========
+    James      Smith    555.55
+    Paula      Brown    222.95
+    Steven     Goldfish 100.22
+    John       Smith    99.95
+    ========== ======== ==========
+  
+  
+  If the optional Table name and Column names inputs are not used, the query would be:
+  
+  ::
+  
+    SELECT t1.c2 as "FirstName", t1.c3 as "LastName", sum(t2.c3) as "TotalSales" 
+    FROM t1 join t2 on t1.c1 = t2.c1 
+    GROUP BY t1.c1 ORDER BY TotalSales DESC;
+  
+  You can selectively name columns, e.g. on the customers input you could just name columns 2,3, and 5: 
+  
+    Column names: ,FirstName,LastName,,BirthDate
+  
+    Results in the following data base table
+  
+    =========== ========== ========== ===================== ========== ============
+    #c1         FirstName  LastName   c4                    BirthDate  c6
+    =========== ========== ========== ===================== ========== ============
+    1           John       Smith      John.Smith@yahoo.com  1968-02-04 626 222-2222
+    2           Steven     Goldfish   goldfish@fishhere.net 1974-04-04 323 455-4545
+    3           Paula      Brown      pb@herowndomain.org   1978-05-24 416 323-3232
+    4           James      Smith      jim@supergig.co.uk    1980-10-20 416 323-8888
+    =========== ========== ========== ===================== ========== ============
+
+  Regular_expression_ functions are included for: 
+
+  ::
+
+    matching:      re_match('pattern',column) 
+
+    SELECT t1.FirstName, t1.LastName
+    FROM t1
+    WHERE re_match('^.*\.(net|org)$',c4)
+
+  Results:
+
+    =========== ==========
+    #FirstName  LastName
+    =========== ==========
+    Steven      Goldfish
+    Paula       Brown
+    =========== ==========
+
+
+  ::
+
+    searching:     re_search('pattern',column)
+    substituting:  re_sub('pattern','replacement,column)
+
+    SELECT t1.FirstName, t1.LastName, re_sub('^\d{2}(\d{2})-(\d\d)-(\d\d)','\3/\2/\1',BirthDate) as "DOB"
+    FROM t1
+    WHERE re_search('[hp]er',c4)
+
+  Results:
+
+    =========== ========== ==========
+    #FirstName  LastName   DOB
+    =========== ========== ==========
+    Steven      Goldfish   04/04/74
+    Paula       Brown      24/05/78
+    James       Smith      20/10/80
+    =========== ========== ==========
+
+.. _Regular_expression: https://docs.python.org/release/2.7/library/re.html
+.. _SQLite: http://www.sqlite.org/index.html
+
+    ]]></help>
+</tool>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/IEDB.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,17 @@
+#ID	allele	seq_num	start	end	length	peptide	method	percentile_rank	ann_ic50	ann_rank	smm_ic50	smm_rank	comblib_sidney2008_score	comblib_sidney2008_rank	netmhcpan_ic50	netmhcpan_rank
+PPAP2C	HLA-A*02:01	1	3	11	9	GMYCMVFLV	Consensus (ann/smm/comblib_sidney2008)	0.2	4	0.2	3.77	0.2	7.1e-06	0.5	-	-
+PPAP2C	HLA-A*23:01	1	1	9	9	SFGMYCMVF	Consensus (ann/smm)	0.5	67	0.5	137.54	0.5	-	-	-	-
+PPAP2C	HLA-A*23:01	1	4	12	9	MYCMVFLVK	Consensus (ann/smm)	0.65	146	0.7	160.11	0.6	-	-	-	-
+PPAP2C	HLA-A*02:01	1	2	10	9	FGMYCMVFL	Consensus (ann/smm/comblib_sidney2008)	2.3	222	3.1	150.01	2.3	2.14e-05	1.3	-	-
+PPAP2C	HLA-A*23:01	1	3	11	9	GMYCMVFLV	Consensus (ann/smm)	4.95	3256	4	2706.64	5.9	-	-	-	-
+PPAP2C	HLA-A*23:01	1	2	10	9	FGMYCMVFL	Consensus (ann/smm)	6.55	4423	4.9	4144.10	8.2	-	-	-	-
+PPAP2C	HLA-A*02:01	1	1	9	9	SFGMYCMVF	Consensus (ann/smm/comblib_sidney2008)	45	24390	45	44989.38	39	0.01	91	-	-
+PPAP2C	HLA-A*02:01	1	4	12	9	MYCMVFLVK	Consensus (ann/smm/comblib_sidney2008)	54	23399	41	157801.09	54	0.01	86	-	-
+ADAMTSL1	HLA-A*02:01	1	1	9	9	SLDMCISGL	Consensus (ann/smm/comblib_sidney2008)	1	26	1	51.65	0.9	3.02e-05	1.7	-	-
+ADAMTSL1	HLA-A*23:01	1	4	12	9	MCISGLCQL	Consensus (ann/smm)	6.65	5781	5.9	3626.02	7.4	-	-	-	-
+ADAMTSL1	HLA-A*02:01	1	4	12	9	MCISGLCQL	Consensus (ann/smm/comblib_sidney2008)	14	1823	6.5	2612.82	14	0.00056	24	-	-
+ADAMTSL1	HLA-A*23:01	1	1	9	9	SLDMCISGL	Consensus (ann/smm)	30.5	27179	34	24684.82	27	-	-	-	-
+ADAMTSL1	HLA-A*02:01	1	2	10	9	LDMCISGLC	Consensus (ann/smm/comblib_sidney2008)	42	23677	42	53716.78	41	0.01	71	-	-
+ADAMTSL1	HLA-A*23:01	1	3	11	9	DMCISGLCQ	Consensus (ann/smm)	64.5	34451	73	118148.99	56	-	-	-	-
+ADAMTSL1	HLA-A*23:01	1	2	10	9	LDMCISGLC	Consensus (ann/smm)	76.0	33222	62	665932.18	90	-	-	-	-
+ADAMTSL1	HLA-A*02:01	1	3	11	9	DMCISGLCQ	Consensus (ann/smm/comblib_sidney2008)	97	31630	98	639896.89	71	0.03	97	-	-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/customers.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,5 @@
+#CustomerID	FirstName	LastName	Email	DOB	Phone
+1	John	Smith	John.Smith@yahoo.com	1968-02-04	626 222-2222
+2	Steven	Goldfish	goldfish@fishhere.net	1974-04-04	323 455-4545
+3	Paula	Brown	pb@herowndomain.org	1978-05-24	416 323-3232
+4	James	Smith	jim@supergig.co.uk	1980-10-20	416 323-8888
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/netMHC_summary.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,9 @@
+#pos	peptide	logscore	affinity(nM)	Bind Level	Protein Name	Allele
+2	GMYCMVFLV	0.858	4	SB	PPAP2C	HLA-A02:01
+1	FGMYCMVFL	0.501	222	WB	PPAP2C	HLA-A02:01
+3	MYCMVFLVK	0.070	23399		PPAP2C	HLA-A02:01
+0	SFGMYCMVF	0.066	24390		PPAP2C	HLA-A02:01
+0	SLDMCISGL	0.698	26	SB	ADAMTSL1	HLA-A02:01
+3	MCISGLCQL	0.306	1823		ADAMTSL1	HLA-A02:01
+1	LDMCISGLC	0.069	23677		ADAMTSL1	HLA-A02:01
+2	DMCISGLCQ	0.042	31630		ADAMTSL1	HLA-A02:01
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/query_results.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,17 @@
+#ID	peptide	start	end	percentile_rank	logscore	affinity	Bind_Level
+PPAP2C	GMYCMVFLV	3	11	0.2	0.858	4	SB
+PPAP2C	GMYCMVFLV	3	11	4.95	0.858	4	SB
+ADAMTSL1	SLDMCISGL	1	9	1.0	0.698	26	SB
+ADAMTSL1	SLDMCISGL	1	9	30.5	0.698	26	SB
+PPAP2C	FGMYCMVFL	2	10	2.3	0.501	222	WB
+PPAP2C	FGMYCMVFL	2	10	6.55	0.501	222	WB
+ADAMTSL1	MCISGLCQL	4	12	6.65	0.306	1823	None
+ADAMTSL1	MCISGLCQL	4	12	14.0	0.306	1823	None
+PPAP2C	MYCMVFLVK	4	12	0.65	0.07	23399	None
+PPAP2C	MYCMVFLVK	4	12	54.0	0.07	23399	None
+ADAMTSL1	LDMCISGLC	2	10	42.0	0.069	23677	None
+ADAMTSL1	LDMCISGLC	2	10	76.0	0.069	23677	None
+PPAP2C	SFGMYCMVF	1	9	0.5	0.066	24390	None
+PPAP2C	SFGMYCMVF	1	9	45.0	0.066	24390	None
+ADAMTSL1	DMCISGLCQ	3	11	64.5	0.042	31630	None
+ADAMTSL1	DMCISGLCQ	3	11	97.0	0.042	31630	None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/regex_results.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,4 @@
+#FirstName	LastName	DOB
+Steven	Goldfish	04/04/74
+Paula	Brown	24/05/78
+James	Smith	20/10/80
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sales.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,6 @@
+#CustomerID	Date	SaleAmount
+2	2004-05-06	100.22
+1	2004-05-07	99.95
+3	2004-05-07	122.95
+3	2004-05-13	100.00
+4	2004-05-22	555.55
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sales_results.tsv	Thu Jan 21 08:23:45 2016 -0500
@@ -0,0 +1,5 @@
+#FirstName	LastName	TotalSales
+James	Smith	555.55
+Paula	Brown	222.95
+Steven	Goldfish	100.22
+John	Smith	99.95