changeset 33:aa0420172fc6

Deleted selected files
author m-zytnicki
date Tue, 30 Apr 2013 14:34:53 -0400
parents 3441fe98a2ba
children 529e3e6a0954
files commons/core/launcher/JobScriptTemplate.py commons/core/launcher/JobScriptTemplateLight.py commons/core/launcher/JobScriptWithFilesCopyTemplate.py commons/core/launcher/Launcher.py commons/core/launcher/Launcher2.py commons/core/launcher/LauncherUtils.py commons/core/launcher/WriteScript.py commons/core/launcher/__init__.py commons/core/launcher/test/Test_Launcher.py commons/core/launcher/test/Test_Launcher2.py commons/core/launcher/test/Test_LauncherUtils.py commons/core/launcher/test/Test_WriteScript.py commons/core/launcher/test/__init__.py commons/core/launcher/test/expFiles/expJobScriptSQLiteWithFilesCopyTemplate.py commons/core/launcher/test/expFiles/expJobScriptTemplate.py commons/core/launcher/test/expFiles/expJobScriptTemplateLight.py commons/core/launcher/test/expFiles/expJobScriptTemplate_cmdWith2Lines.py commons/core/launcher/test/expFiles/expJobScriptWithFilesCopyTemplate.py commons/core/sql/DbFactory.py commons/core/sql/DbMySql.py commons/core/sql/DbSQLite.py commons/core/sql/ITableMapAdaptator.py commons/core/sql/ITableMatchAdaptator.py commons/core/sql/ITablePathAdaptator.py commons/core/sql/ITableSeqAdaptator.py commons/core/sql/ITableSetAdaptator.py commons/core/sql/Job.py commons/core/sql/JobAdaptator.py commons/core/sql/OldRepetDB.py commons/core/sql/RepetJob.py commons/core/sql/TableAdaptator.py commons/core/sql/TableBinPathAdaptator.py commons/core/sql/TableBinSetAdaptator.py commons/core/sql/TableJobAdaptator.py commons/core/sql/TableJobAdaptatorFactory.py commons/core/sql/TableMapAdaptator.py commons/core/sql/TableMatchAdaptator.py commons/core/sql/TablePathAdaptator.py commons/core/sql/TableSeqAdaptator.py commons/core/sql/TableSetAdaptator.py commons/core/sql/__init__.py commons/core/sql/test/TestSuite_sql.py commons/core/sql/test/Test_DbFactory.py commons/core/sql/test/Test_DbMySql.py commons/core/sql/test/Test_DbSQLite.py commons/core/sql/test/Test_F_JobAdaptator.py commons/core/sql/test/Test_F_TableJobAdaptator.py commons/core/sql/test/Test_Job.py commons/core/sql/test/Test_TableBinPathAdaptator.py commons/core/sql/test/Test_TableBinSetAdaptator.py commons/core/sql/test/Test_TableJobAdaptator.py commons/core/sql/test/Test_TableJobAdaptatorFactory.py commons/core/sql/test/Test_TableMapAdaptator.py commons/core/sql/test/Test_TableMatchAdaptator.py commons/core/sql/test/Test_TablePathAdaptator.py commons/core/sql/test/Test_TableSeqAdaptator.py commons/core/sql/test/Test_TableSetAdaptator.py commons/core/sql/test/Tst_F_RepetJob.py commons/core/sql/test/Tst_RepetJob.py commons/core/sql/test/__init__.py commons/core/stat/Stat.py commons/core/stat/__init__.py commons/core/stat/test/Test_F_Stat.py commons/core/stat/test/Test_Stat.py commons/core/stat/test/__init__.py commons/core/test/Test_LoggerFactory.py commons/core/test/__init__.py commons/core/tree/Tree.py commons/core/tree/__init__.py commons/core/tree/test/Test_Tree.py commons/core/tree/test/__init__.py commons/core/tree/test/treeTestSuite.py
diffstat 63 files changed, 0 insertions(+), 15918 deletions(-) [+]
line wrap: on
line diff
--- a/commons/core/launcher/JobScriptTemplate.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '@@tmpDir@@'"
-	sys.stdout.flush()
-	if not os.path.exists( "@@tmpDir@@" ):
-		raise IOError("ERROR: temporary directory '@@tmpDir@@' doesn't exist")
-	
-	minFreeGigaInTmpDir = 1
-	freeSpace = os.statvfs("@@tmpDir@@")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < minFreeGigaInTmpDir):
-		raise RepetException("ERROR: less than %iG of free space in '@@tmpDir@@'" % minFreeGigaInTmpDir)
-	
-	os.chdir("@@tmpDir@@")
-	newDir = "@@groupId@@_@@jobName@@_@@time@@"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	@@cmdStart@@
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	@@cmdFinish@@
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : @@tmpDir@@"
-	print "cDir is : @@cDir@@"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("@@cDir@@/%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "@@cDir@@/%s" % newDir)
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/launcher/JobScriptTemplateLight.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '@@tmpDir@@'"
-	sys.stdout.flush()
-	if not os.path.exists( "@@tmpDir@@" ):
-		raise IOError("ERROR: temporary directory '@@tmpDir@@' doesn't exist")
-	
-	minFreeGigaInTmpDir = 1
-	freeSpace = os.statvfs("@@tmpDir@@")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < minFreeGigaInTmpDir):
-		raise RepetException("ERROR: less than %iG of free space in '@@tmpDir@@'" % minFreeGigaInTmpDir)
-	
-	os.chdir("@@tmpDir@@")
-	newDir = "@@groupId@@_@@jobName@@_@@time@@"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	@@cmdStart@@
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	@@cmdFinish@@
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	sys.stdout.flush()
-	sys.exit(1)
\ No newline at end of file
--- a/commons/core/launcher/JobScriptWithFilesCopyTemplate.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '@@tmpDir@@'"
-	sys.stdout.flush()
-	if not os.path.exists("@@tmpDir@@"):
-		raise IOError("ERROR: temporary directory '@@tmpDir@@' doesn't exist")
-	
-	fileSize = 0
-	if not os.path.exists("@@groupId@@"):
-		@@cmdSize@@
-	freeGigaNeededInTmpDir = float(1 + fileSize)
-	freeSpace = os.statvfs("@@tmpDir@@")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < freeGigaNeededInTmpDir):
-		raise RepetException("ERROR: less than %.2fG of free space in '@@tmpDir@@'" % freeGigaNeededInTmpDir)
-	
-	os.chdir("@@tmpDir@@")
-	if not os.path.exists("@@groupId@@"):
-		try:
-			os.mkdir("@@groupId@@")
-		except OSError, e :
-			if e.args[0] != 17:
-				raise RepetException("ERROR: can't create '@@groupId@@'")
-		os.chdir("@@groupId@@")
-		@@cmdCopy@@
-	else:
-		os.chdir("@@groupId@@")
-	
-	newDir = "@@groupId@@_@@jobName@@_@@time@@"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	@@cmdStart@@
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	@@cmdFinish@@
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : @@tmpDir@@"
-	print "cDir is : @@cDir@@"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("@@cDir@@/%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "@@cDir@@/%s" % newDir)
-	iJob = Job(jobname = "@@jobName@@", groupid = "@@groupId@@", launcherFile = "@@launcher@@", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "@@jobTableName@@")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/launcher/Launcher.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,229 +0,0 @@
-from commons.tools.CleanClusterNodesAfterRepet import CleanClusterNodesAfterRepet
-from commons.core.stat.Stat import Stat
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-from commons.core.sql.Job import Job
-import stat
-import os
-import re
-import sys
-import time
-import glob
-
-class Launcher(object):
-
-    #TODO: remove unused parameters : query="", subject="", param="", job_table=""
-    def __init__( self, jobdb, query="", subject="", param="", cdir="",
-                  tmpdir="", job_table="", queue="", groupid="", acro="X",
-                  chooseTemplateWithCopy = False, chooseTemplateLight = False):
-        if jobdb.__class__.__name__ == "RepetJob":
-            self.jobdb = TableJobAdaptatorFactory.createInstance(jobdb, "jobs")
-        else:
-            self.jobdb = jobdb
-        self.jobdb.checkJobTable()
-        if cdir == "":
-            cdir = os.getcwd()
-        self.cdir = cdir
-        self.tmpdir = tmpdir
-        self.groupid = groupid
-        self.acronyme = acro
-        self._chooseTemplateWithCopy = chooseTemplateWithCopy
-        self._chooseTemplateLight = chooseTemplateLight
-        self.queue, self.lResources = self.getQueueNameAndResources(queue)
-        self._createJobInstance()
-        self._nbJobs = 0
-        
-    def getQueueNameAndResources(self, configQueue):
-        tokens = configQueue.replace("'","").split(" ")
-        queueName = ""
-        lResources = []
-        if tokens[0] != "":
-            if re.match(".*\.q", tokens[0]):
-                queueName = tokens[0]
-                lResources = tokens[1:]
-            else:
-                lResources = tokens
-        return queueName, lResources
-
-    def createGroupidIfItNotExist(self):
-        if self.groupid == "":
-            self.job.groupid = str(os.getpid())
-        else:
-            self.job.groupid = self.groupid
-
-    def beginRun( self ):
-        self.createGroupidIfItNotExist()
-        if self.jobdb.hasUnfinishedJob(self.job.groupid):
-            self.jobdb.waitJobGroup(self.job.groupid)
-        else:
-            self.jobdb.cleanJobGroup(self.job.groupid)
-
-    ## Launch one job in parallel
-    #
-    # @param cmdStart string command-line for the job to be launched
-    # @param cmdFinish string command to retrieve result files
-    # @warning the jobname has to be defined outside from this method
-    #
-    def runSingleJob(self, cmdStart, cmdFinish = "", cmdSize = "", cmdCopy = ""):
-        if self._nbJobs == 0:
-            self._nbJobs = 1
-        pid = str(os.getpid())
-        now = time.localtime()
-        #TODO: rename ClusterLauncher_ ...
-        pyFileName = self.cdir + "/ClusterLauncher_" + self.job.groupid + "_" +\
-                     self.job.jobname + "_" + str(now[0]) + "-" + str(now[1]) +\
-                     "-" + str(now[2]) + "_" + pid + ".py"
-        self.job.launcher = pyFileName
-        
-        #TODO: to remove when refactoring is done
-        cmdStart = self._indentCmd(cmdStart)
-        cmdFinish = self._indentCmd(cmdFinish)
-        
-        iWriteScript = WriteScript(self.job, self.jobdb, self.cdir, self.tmpdir, self._chooseTemplateWithCopy, self._chooseTemplateLight)
-        iWriteScript.run(cmdStart, cmdFinish, pyFileName, cmdSize, cmdCopy)
-        os.chmod(pyFileName, stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IROTH+stat.S_IXOTH)
-        sys.stdout.flush()
-        log = self.jobdb.submitJob(self.job)
-        if log != 0:
-            print "ERROR while submitting job to the cluster"
-            sys.exit(1)
-        
-    def endRun(self, cleanNodes = False):
-        string = "waiting for %i job(s) with groupid '%s' (%s)" % (self._nbJobs, self.job.groupid, time.strftime("%Y-%m-%d %H:%M:%S"))
-        print string; sys.stdout.flush()
-        self.jobdb.waitJobGroup(self.job.groupid)
-        if self._nbJobs > 1:
-            string = "all jobs with groupid '%s' are finished (%s)" % (self.job.groupid, time.strftime("%Y-%m-%d %H:%M:%S"))
-            print string; sys.stdout.flush()
-
-        if cleanNodes:
-            string = "start cleaning cluster nodes (%s)" % time.strftime("%Y-%m-%d %H:%M:%S")
-            print string; sys.stdout.flush()
-            self.cleanNodes()
-            string = "end cleaning cluster nodes (%s)" % time.strftime("%Y-%m-%d %H:%M:%S")
-            print string; sys.stdout.flush()
-            
-        statsExecutionTime = self.getStatsOfExecutionTime()
-        if self._nbJobs > 1:
-            print "execution time of all jobs (seconds): %f" % statsExecutionTime.getSum()
-        print "execution time per job: %s" % statsExecutionTime.string()
-        sys.stdout.flush()
-        self.jobdb.cleanJobGroup(self.job.groupid)
-        
-    def getStatsOfExecutionTime(self, acronyme = ""):
-        stat = Stat()
-        if acronyme == "":
-            pattern = "%s*.o*" % self.acronyme
-        else:
-            pattern = "%s*.o*" % acronyme
-        lJobFiles = glob.glob(pattern)
-        for f in lJobFiles:
-            fH = open(f, "r")
-            while True:
-                line = fH.readline()
-                if line == "":
-                    break
-                if "executionTime" in line:
-                    stat.add( float(line[:-1].split("=")[1] ) )
-                    break
-            fH.close()
-        return stat     
-
-    def clean( self, acronyme = "", stdout = True, stderr = True ):
-        lFileToRemove = []
-        if acronyme == "":
-            acronyme = self.acronyme  
-        pattern = "ClusterLauncher*%s*.py" % ( acronyme )
-        lFileToRemove.extend(glob.glob( pattern ))
-        if stdout:
-            pattern = "%s*.o*" % ( acronyme )
-            lFileToRemove.extend(glob.glob( pattern ))        
-        if stderr:
-            pattern = "%s*.e*" % ( acronyme )
-            lFileToRemove.extend(glob.glob( pattern ))                   
-        for file in lFileToRemove:
-            os.remove(file)
-    
-    #TODO: handle of nodesMustBeCleaned => class attribute ?
-    def runLauncherForMultipleJobs(self, acronymPrefix, lCmdsTuples, cleanMustBeDone = True, nodesMustBeCleaned = False):
-        self.beginRun()
-        print "submitting job(s) with groupid '%s' (%s)" % (self.job.groupid,  time.strftime("%Y-%m-%d %H:%M:%S"))
-        for cmdsTuple in lCmdsTuples:
-            self._nbJobs += 1
-            self.acronyme = "%s_%s" % (acronymPrefix, self._nbJobs)
-            self.job.jobname = self.acronyme
-            if len(cmdsTuple) == 2:
-                self.runSingleJob(cmdsTuple[0], cmdsTuple[1])
-            else:
-                self.runSingleJob(cmdsTuple[0], cmdsTuple[1], cmdsTuple[2], cmdsTuple[3])
-                self._createJobInstance()
-                self.createGroupidIfItNotExist()
-        self.acronyme = acronymPrefix
-        self.endRun(nodesMustBeCleaned)
-        if cleanMustBeDone:
-            self.clean("%s_" % acronymPrefix)
-        self.jobdb.close()
-
-    def prepareCommands(self, lCmds, lCmdStart = [], lCmdFinish = [], lCmdSize = [], lCmdCopy = []):
-        cmdStart = ""
-        for cmd in lCmdStart:
-            cmdStart += "%s\n\t" % cmd
-        for cmd in lCmds:
-            cmdStart += "%s\n\t" % cmd
-        cmdFinish = ""
-        for cmd in lCmdFinish:
-            cmdFinish += "%s\n\t" % cmd
-        cmdSize = ""
-        for cmd in lCmdSize:
-            cmdSize += "%s\n\t\t" % cmd
-        cmdCopy = ""
-        for cmd in lCmdCopy:
-            cmdCopy += "%s\n\t\t" % cmd
-        return (cmdStart, cmdFinish, cmdSize, cmdCopy)
-
-    #TODO: to remove when refactoring is done
-    def prepareCommands_withoutIndentation(self, lCmds, lCmdStart = [], lCmdFinish = [], lCmdSize = [], lCmdCopy = []):
-        cmdStart = ""
-        for cmd in lCmdStart:
-            cmdStart += "%s\n" % cmd
-        for cmd in lCmds:
-            cmdStart += "%s\n" % cmd
-        cmdFinish = ""
-        for cmd in lCmdFinish:
-            cmdFinish += "%s\n" % cmd
-        cmdSize = ""
-        for cmd in lCmdSize:
-            cmdSize += "%s\n\t\t" % cmd
-        cmdCopy = ""
-        for cmd in lCmdCopy:
-            cmdCopy += "%s\n\t\t" % cmd
-        return (cmdStart, cmdFinish, cmdSize, cmdCopy)
-    
-    def getSystemCommand(self, prg, lArgs):
-        systemCmd = "log = os.system(\"" + prg 
-        for arg in lArgs:
-            systemCmd += " " + arg
-        systemCmd += "\")"
-        return systemCmd
-
-    def cleanNodes(self):
-        iCleanClusterNodeAfterRepet = CleanClusterNodesAfterRepet()
-        iCleanClusterNodeAfterRepet.setLNodes(self.jobdb.getNodesListByGroupId(self.groupid))
-        iCleanClusterNodeAfterRepet.setTempDirectory(self.tmpdir)
-        iCleanClusterNodeAfterRepet.setPattern("%s*" % self.groupid)
-        iCleanClusterNodeAfterRepet.run()
-
-    #TODO: to remove when refactoring is done
-    def _indentCmd(self, cmd):
-        lCmd = cmd.split("\n")
-        cmd_Tab = "%s\n" % lCmd[0]
-        for line in lCmd[1:-1]:
-            cmd_Tab += "\t%s\n" % line
-        return cmd_Tab
-    
-    def _createJobInstance(self):
-        if self.lResources == []:
-            #To have mem_free=1G:
-            self.job = Job(queue=self.queue)
-        else:
-            self.job = Job(queue=self.queue, lResources=self.lResources)
--- a/commons/core/launcher/Launcher2.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,294 +0,0 @@
-from commons.tools.CleanClusterNodesAfterRepet import CleanClusterNodesAfterRepet
-from commons.core.stat.Stat import Stat
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-from commons.core.sql.Job import Job
-import stat
-import os
-import re
-import sys
-import time
-import glob
-
-class LauncherParameter(object):
-
-    def __init__(self, jobDB):
-        self._jobDB = jobDB
-    
-    def getJobDB(self):
-        return self._jobDB
-
-    def setQuery(self, query):
-        self._query = query
-
-    def setSubject(self, subject):
-        self._subject = subject
-        
-    def setParam(self, param):
-        self._param = param
-    
-    def setCurrentDir(self, currentDir):
-        self._currentDir = currentDir
-    
-    def getCurrentDir(self):
-        return self._currentDir    
-
-    def setTempDir(self, tempDir):
-        self._tempDir = tempDir
-    
-    def getTempDir(self):
-        return self._tempDir
-        
-    def setJobTable(self, jobTable):
-        self._jobTable = jobTable
-        
-    def setQueue(self, queue):
-        self._queue = queue
-    
-    def getQueue(self):
-        return self._queue
-        
-    def setGroupId(self, groupId):
-        self._groupId = groupId
-    
-    def getGroupId(self):
-        return self._groupId
-    
-    def setAcronym(self, acronym):
-        self._acronym = acronym
-    
-    def getAcronym(self):
-        return self._acronym
-   
-    @staticmethod
-    def createParameter(jobdb, groupid, acronym):
-	launcherParameter = LauncherParameter(jobdb)
-        launcherParameter.setQuery(os.getcwd())
-        launcherParameter.setSubject("")
-        launcherParameter.setParam("")
-        launcherParameter.setCurrentDir(os.getcwd())
-        launcherParameter.setTempDir(os.getcwd())
-        launcherParameter.setJobTable("")
-        launcherParameter.setQueue("")
-        launcherParameter.setGroupId(groupid)
-        launcherParameter.setAcronym(acronym)
-	return launcherParameter       
-
-        
-class Launcher2(object):
-
-    #TODO: remove unused parameters : query="", subject="", param="", job_table=""
-    def __init__(self, iLauncherParameter):
-        jobdb = iLauncherParameter.getJobDB()
-        cdir = iLauncherParameter.getCurrentDir()
-        if jobdb.__class__.__name__ == "RepetJob":
-            self.jobdb = TableJobAdaptatorFactory.createInstance(jobdb, "jobs")
-        else:
-            self.jobdb = jobdb
-        self.jobdb.checkJobTable()
-        if cdir == "":
-            cdir = os.getcwd()
-        self.cdir = cdir
-        self.tmpdir = iLauncherParameter.getTempDir()
-        self.groupid = iLauncherParameter.getGroupId()
-        self.acronyme = iLauncherParameter.getAcronym()
-        self._chooseTemplateWithCopy = False
-        self._chooseTemplateLight = False
-        self.queue, self.lResources = self.getQueueNameAndResources(iLauncherParameter.getQueue())
-        self._createJobInstance()
-        self._nbJobs = 0
-        
-    def getQueueNameAndResources(self, configQueue):
-        tokens = configQueue.replace("'","").split(" ")
-        queueName = ""
-        lResources = []
-        if tokens[0] != "":
-            if re.match(".*\.q", tokens[0]):
-                queueName = tokens[0]
-                lResources = tokens[1:]
-            else:
-                lResources = tokens
-        return queueName, lResources
-
-    def createGroupidIfItNotExist(self):
-        if self.groupid == "":
-            self.job.groupid = str(os.getpid())
-        else:
-            self.job.groupid = self.groupid
-
-    def beginRun( self ):
-        self.createGroupidIfItNotExist()
-        if self.jobdb.hasUnfinishedJob(self.job.groupid):
-            self.jobdb.waitJobGroup(self.job.groupid)
-        else:
-            self.jobdb.cleanJobGroup(self.job.groupid)
-
-    ## Launch one job in parallel
-    #
-    # @param cmdStart string command-line for the job to be launched
-    # @param cmdFinish string command to retrieve result files
-    # @warning the jobname has to be defined outside from this method
-    #
-    def runSingleJob(self, cmdStart, cmdFinish = "", cmdSize = "", cmdCopy = ""):
-        if self._nbJobs == 0:
-            self._nbJobs = 1
-        pid = str(os.getpid())
-        now = time.localtime()
-        #TODO: rename ClusterLauncher_ ...
-        pyFileName = self.cdir + "/ClusterLauncher_" + self.job.groupid + "_" +\
-                     self.job.jobname + "_" + str(now[0]) + "-" + str(now[1]) +\
-                     "-" + str(now[2]) + "_" + pid + ".py"
-        self.job.launcher = pyFileName
-        
-        #TODO: to remove when refactoring is done
-        cmdStart = self._indentCmd(cmdStart)
-        cmdFinish = self._indentCmd(cmdFinish)
-        
-        iWriteScript = WriteScript(self.job, self.jobdb, self.cdir, self.tmpdir, self._chooseTemplateWithCopy, self._chooseTemplateLight)
-        iWriteScript.run(cmdStart, cmdFinish, pyFileName, cmdSize, cmdCopy)
-        os.chmod(pyFileName, stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IROTH+stat.S_IXOTH)
-        sys.stdout.flush()
-        log = self.jobdb.submitJob(self.job)
-        if log != 0:
-            print "ERROR while submitting job to the cluster"
-            sys.exit(1)
-        
-    def endRun(self, cleanNodes = False):
-        string = "waiting for %i job(s) with groupid '%s' (%s)" % (self._nbJobs, self.job.groupid, time.strftime("%Y-%m-%d %H:%M:%S"))
-        print string; sys.stdout.flush()
-        self.jobdb.waitJobGroup(self.job.groupid)
-        if self._nbJobs > 1:
-            string = "all jobs with groupid '%s' are finished (%s)" % (self.job.groupid, time.strftime("%Y-%m-%d %H:%M:%S"))
-            print string; sys.stdout.flush()
-
-        if cleanNodes:
-            string = "start cleaning cluster nodes (%s)" % time.strftime("%Y-%m-%d %H:%M:%S")
-            print string; sys.stdout.flush()
-            self.cleanNodes()
-            string = "end cleaning cluster nodes (%s)" % time.strftime("%Y-%m-%d %H:%M:%S")
-            print string; sys.stdout.flush()
-            
-        statsExecutionTime = self.getStatsOfExecutionTime()
-        if self._nbJobs > 1:
-            print "execution time of all jobs (seconds): %f" % statsExecutionTime.getSum()
-        print "execution time per job: %s" % statsExecutionTime.string()
-        sys.stdout.flush()
-        self.jobdb.cleanJobGroup(self.job.groupid)
-        
-    def getStatsOfExecutionTime(self, acronyme = ""):
-        stat = Stat()
-        if acronyme == "":
-            pattern = "%s*.o*" % self.acronyme
-        else:
-            pattern = "%s*.o*" % acronyme
-        lJobFiles = glob.glob(pattern)
-        for f in lJobFiles:
-            fH = open(f, "r")
-            while True:
-                line = fH.readline()
-                if line == "":
-                    break
-                if "executionTime" in line:
-                    stat.add( float(line[:-1].split("=")[1] ) )
-                    break
-            fH.close()
-        return stat     
-
-    def clean( self, acronyme = "", stdout = True, stderr = True ):
-        lFileToRemove = []
-        if acronyme == "":
-            acronyme = self.acronyme  
-        pattern = "ClusterLauncher*%s*.py" % ( acronyme )
-        lFileToRemove.extend(glob.glob( pattern ))
-        if stdout:
-            pattern = "%s*.o*" % ( acronyme )
-            lFileToRemove.extend(glob.glob( pattern ))        
-        if stderr:
-            pattern = "%s*.e*" % ( acronyme )
-            lFileToRemove.extend(glob.glob( pattern ))                   
-        for file in lFileToRemove:
-            os.remove(file)
-    
-    #TODO: handle of nodesMustBeCleaned => class attribute ?
-    def runLauncherForMultipleJobs(self, acronymPrefix, lCmdsTuples, cleanMustBeDone = True, nodesMustBeCleaned = False):
-        self.beginRun()
-        print "submitting job(s) with groupid '%s' (%s)" % (self.job.groupid,  time.strftime("%Y-%m-%d %H:%M:%S"))
-        for cmdsTuple in lCmdsTuples:
-            self._nbJobs += 1
-            self.acronyme = "%s_%s" % (acronymPrefix, self._nbJobs)
-            self.job.jobname = self.acronyme
-            if len(cmdsTuple) == 2:
-                self.runSingleJob(cmdsTuple[0], cmdsTuple[1])
-            else:
-                self.runSingleJob(cmdsTuple[0], cmdsTuple[1], cmdsTuple[2], cmdsTuple[3])
-                self._createJobInstance()
-                self.createGroupidIfItNotExist()
-        self.acronyme = acronymPrefix
-        self.endRun(nodesMustBeCleaned)
-        if cleanMustBeDone:
-            self.clean("%s_" % acronymPrefix)
-        self.jobdb.close()
-
-    def prepareCommands(self, lCmds, lCmdStart = [], lCmdFinish = [], lCmdSize = [], lCmdCopy = []):
-        cmdStart = ""
-        for cmd in lCmdStart:
-            cmdStart += "%s\n\t" % cmd
-        for cmd in lCmds:
-            cmdStart += "%s\n\t" % cmd
-        cmdFinish = ""
-        for cmd in lCmdFinish:
-            cmdFinish += "%s\n\t" % cmd
-        cmdSize = ""
-        for cmd in lCmdSize:
-            cmdSize += "%s\n\t\t" % cmd
-        cmdCopy = ""
-        for cmd in lCmdCopy:
-            cmdCopy += "%s\n\t\t" % cmd
-        return (cmdStart, cmdFinish, cmdSize, cmdCopy)
-
-    #TODO: to remove when refactoring is done
-    def prepareCommands_withoutIndentation(self, lCmds, lCmdStart = [], lCmdFinish = [], lCmdSize = [], lCmdCopy = []):
-        cmdStart = ""
-        for cmd in lCmdStart:
-            cmdStart += "%s\n" % cmd
-        for cmd in lCmds:
-            cmdStart += "%s\n" % cmd
-        cmdFinish = ""
-        for cmd in lCmdFinish:
-            cmdFinish += "%s\n" % cmd
-        cmdSize = ""
-        for cmd in lCmdSize:
-            cmdSize += "%s\n\t\t" % cmd
-        cmdCopy = ""
-        for cmd in lCmdCopy:
-            cmdCopy += "%s\n\t\t" % cmd
-        return (cmdStart, cmdFinish, cmdSize, cmdCopy)
-    
-    def getSystemCommand(self, prg, lArgs):
-        systemCmd = "log = os.system(\"" + prg 
-        for arg in lArgs:
-            systemCmd += " " + arg
-        systemCmd += "\")"
-        return systemCmd
-
-    def cleanNodes(self):
-        iCleanClusterNodeAfterRepet = CleanClusterNodesAfterRepet()
-        iCleanClusterNodeAfterRepet.setLNodes(self.jobdb.getNodesListByGroupId(self.groupid))
-        iCleanClusterNodeAfterRepet.setTempDirectory(self.tmpdir)
-        iCleanClusterNodeAfterRepet.setPattern("%s*" % self.groupid)
-        iCleanClusterNodeAfterRepet.run()
-
-    #TODO: to remove when refactoring is done
-    def _indentCmd(self, cmd):
-        lCmd = cmd.split("\n")
-        cmd_Tab = "%s\n" % lCmd[0]
-        for line in lCmd[1:-1]:
-            cmd_Tab += "\t%s\n" % line
-        return cmd_Tab
-    
-    def _createJobInstance(self):
-        if self.lResources == []:
-            #To have mem_free=1G:
-            self.job = Job(queue=self.queue)
-        else:
-            self.job = Job(queue=self.queue, lResources=self.lResources)
--- a/commons/core/launcher/LauncherUtils.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-class LauncherUtils(object):
-
-    @staticmethod
-    def createHomogeneousSizeList(lStringSizeTuples, maxSize):
-        lStringSizeTuplesSorted = sorted(lStringSizeTuples, key=lambda stringSizeTuple:(stringSizeTuple[1], stringSizeTuple[0]), reverse = True)
-        lStringSizeList = []
-        lStringSize = []
-        sumTupleSize = 0
-        iteratorFromBegin = 0
-        iteratorFromEnd = len(lStringSizeTuplesSorted) - 1
-        for tuple in lStringSizeTuplesSorted:
-            if sumTupleSize + tuple[1] < maxSize:
-                lStringSize.append(tuple[0])
-                sumTupleSize += tuple[1]
-            elif tuple[1] >= maxSize:
-                lStringSizeList.append([tuple[0]])
-            else:
-                tupleFromEnd = lStringSizeTuplesSorted[iteratorFromEnd]
-                while sumTupleSize + tupleFromEnd[1] < maxSize and iteratorFromBegin < iteratorFromEnd:
-                    lStringSize.append(tupleFromEnd[0])
-                    sumTupleSize += tupleFromEnd[1]
-                    del lStringSizeTuplesSorted[iteratorFromEnd]
-                    iteratorFromEnd -= 1
-                    tupleFromEnd = lStringSizeTuplesSorted[iteratorFromEnd]
-                lStringSizeList.append(lStringSize)
-                lStringSize = [tuple[0]]
-                sumTupleSize = tuple[1]
-            iteratorFromBegin += 1
-        if lStringSize:
-            lStringSizeList.append(lStringSize)
-        return lStringSizeList      
\ No newline at end of file
--- a/commons/core/launcher/WriteScript.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,76 +0,0 @@
-import os
-import time
-
-class WriteScript(object):
-
-    def __init__(self, job = None, jobdb = None, cdir = "", tmpdir = "", chooseTemplateWithCopy = False, chooseTemplateLight = False):
-        self._iJob = job
-        self._iJobdb = jobdb
-        self._cDir = cdir
-        self._tmpDir = tmpdir
-        self._chooseTemplateWithCopy = chooseTemplateWithCopy
-        self._chooseTemplateLight = chooseTemplateLight
-
-    def run(self, cmdStart, cmdFinish, pyFileName, cmdSize = "", cmdCopy = ""):
-        if self._chooseTemplateLight:
-            d = self.createJobScriptLightDict(cmdStart, cmdFinish, cmdSize, cmdCopy)
-        else:
-            d = self.createJobScriptDict(cmdStart, cmdFinish, cmdSize, cmdCopy)
-        self.fillTemplate(pyFileName, d)
-    
-    def fillTemplate(self, outputFileName, dict):
-        if self._chooseTemplateWithCopy:
-            inputFileName = "%s/commons/core/launcher/JobScriptWithFilesCopyTemplate.py" % os.environ["REPET_PATH"]
-        else:
-            inputFileName = "%s/commons/core/launcher/JobScriptTemplate.py" % os.environ["REPET_PATH"]
-
-        if self._chooseTemplateLight:
-            inputFileName = "%s/commons/core/launcher/JobScriptTemplateLight.py" % os.environ["REPET_PATH"]
-            
-        input = open(inputFileName, "r")
-        data = input.read()
-        input.close()
-        for key, value in dict.items():
-            data = data.replace("@@%s@@" % key, value)
-        output = open(outputFileName, "w")
-        output.write(data)
-        output.close()
-    
-    def createJobScriptDict(self, cmdStart, cmdFinish, cmdSize, cmdCopy):
-        dict = {
-         "tmpDir" : self._tmpDir,
-         "jobTableName" : self._iJobdb._table,
-         "groupId" : self._iJob.groupid,
-         "jobName" : self._iJob.jobname,
-         "launcher" : self._iJob.launcher,
-         "time" : time.strftime("%Y%m%d-%H%M%S"),
-         "repet_path" : os.environ["REPET_PATH"],
-         "repet_host" : os.environ["REPET_HOST"],
-         "repet_user" : os.environ["REPET_USER"],
-         "repet_pw" : os.environ["REPET_PW"],
-         "repet_db" : os.environ["REPET_DB"],
-         "repet_port" : os.environ["REPET_PORT"],
-         "cmdStart" : cmdStart,
-         "cmdFinish" : cmdFinish,
-         "cDir" : self._cDir,
-         "cmdSize" : cmdSize,
-         "cmdCopy" : cmdCopy
-            }      
-        return dict
-    
-    def createJobScriptLightDict(self, cmdStart, cmdFinish, cmdSize, cmdCopy):
-        dict = {
-         "tmpDir" : self._tmpDir,
-         "jobTableName" : self._iJobdb._table,
-         "groupId" : self._iJob.groupid,
-         "jobName" : self._iJob.jobname,
-         "launcher" : self._iJob.launcher,
-         "time" : time.strftime("%Y%m%d-%H%M%S"),
-         "repet_path" : os.environ["REPET_PATH"],
-         "cmdStart" : cmdStart,
-         "cmdFinish" : cmdFinish,
-         "cDir" : self._cDir,
-         "cmdSize" : cmdSize,
-         "cmdCopy" : cmdCopy
-            }      
-        return dict
--- a/commons/core/launcher/test/Test_Launcher.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,330 +0,0 @@
-from commons.core.utils.FileUtils import FileUtils
-from commons.core.launcher.Launcher import Launcher
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.stat.Stat import Stat
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-import unittest
-import os
-import shutil
-import time
-import stat
-
-#TODO: Test_F_Launcher.py : to execute prepareCommands() and runSingleJob()
-#                            to test runLauncherForMultipleJobs()
-#TODO: check clean of "Test_runSingleJob"
-#TODO: refactoring => choose between "self._queue" or "lResources" to set resources
-class Test_Launcher(unittest.TestCase):
-
-    SARUMAN_NAME = "compute-2-46.local"
-    
-    def setUp(self):
-        self._cDir = os.getcwd()
-        self._tmpDir = self._cDir
-        self._groupid = "test"
-        self._jobTable = "dummyJobTable"
-        self._iDb = DbFactory.createInstance()
-        self._iDb.createTable(self._jobTable, "jobs", overwrite = True)
-        self._jobdb = TableJobAdaptatorFactory.createInstance(self._iDb, self._jobTable)
-        self._queue = ""
-        self._configFileName = "dummyConfigFile"
-    
-    def tearDown(self):
-        self._iDb.dropTable(self._jobTable)
-        self._iDb.close()
-        FileUtils.removeFilesByPattern('*.e*')
-        FileUtils.removeFilesByPattern('*.o*')
-        FileUtils.removeFilesByPattern('launcherFileTest_BeginRun.py')
-        FileUtils.removeFilesByPattern(self._configFileName)
-        FileUtils.removeFilesByPattern('ClusterLauncher_*')
-        
-    def test__init__wrong_fields_for_job_table(self):
-        self._iDb.dropTable(self._jobTable)
-        sqlCmd = "CREATE TABLE " + self._jobTable 
-        sqlCmd += " ( jobid INT UNSIGNED"
-        sqlCmd += ", jobname VARCHAR(255)"
-        sqlCmd += ", groupid VARCHAR(255)"
-        sqlCmd += ", command TEXT"
-        sqlCmd += ", launcher VARCHAR(1024)"
-        sqlCmd += ", queue VARCHAR(255)"
-        sqlCmd += ", status VARCHAR(255)"
-        sqlCmd += ", time DATETIME"
-        sqlCmd += ", node VARCHAR(255) )"
-        self._iDb.execute(sqlCmd)
-        acronym = "Test__init__"
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        lExpFields = sorted(["jobid", "jobname", "groupid", "launcher", "queue", "resources", "status", "time", "node"])
-        lObsFields = sorted(self._iDb.getFieldList(self._jobTable))
-        self.assertEquals(lExpFields, lObsFields)
-        expJob = Job(queue = self._queue)
-        obsJob = iLauncher.job
-        self.assertEquals(expJob, obsJob)
-        
-    def test__init__withResources(self):
-        queue = "main.q mem_free=3G"
-        acronym = "Test__init__"
-        expQueue = "main.q"
-        explResources = ['mem_free=3G']
-        expJob = Job(queue = expQueue, lResources = explResources)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", queue, self._groupid, acronym)
-        obsJob = iLauncher.job
-        self.assertEquals(expJob, obsJob)
-
-    def test_createGroupidIfItNotExist(self):
-        acronym = "checkGroupID"
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.createGroupidIfItNotExist()
-        obsGroupid = iLauncher.job.groupid
-        self.assertEquals(self._groupid, obsGroupid)
-
-    def test_createGroupidIfItNotExist_without_groupid(self):
-        groupid = ""
-        acronym = "checkGroupID"
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, groupid, acronym)
-        iLauncher.createGroupidIfItNotExist()
-        obsGroupid = iLauncher.job.groupid
-        self.assertTrue(obsGroupid != "")
-        
-    def test_beginRun_with_Job_finished_in_Table(self):
-        acronym = "BeginRun"
-        iJob = Job(queue = self._queue)
-        self._jobdb.recordJob(iJob)
-        self._jobdb.changeJobStatus(iJob, "finished")
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.beginRun()
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 0)
-        
-    def test_beginRun_with_Job_unfinished_in_Table(self):
-        acronym = "testU_BeginRun"
-        cmd_start = "log = os.system( \"date;sleep 10;date\" )\n"
-        pyFileName = "%s/launcherFileTest_BeginRun.py" % os.getcwd()
-        if Test_Launcher.SARUMAN_NAME == os.getenv("HOSTNAME"):
-            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName, lResources=["test=TRUE"])
-        else: 
-            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName)
-        iWriteScript = WriteScript(iJob, self._jobdb, self._cDir, self._tmpDir)
-        iWriteScript.run(cmd_start, "", pyFileName)
-        os.chmod(pyFileName, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
-        self._jobdb.submitJob(iJob)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        
-        iLauncher.beginRun()
-        
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 1)
-    
-    def test_getStatsOfExecutionTime(self):
-        acronym = "test_statTime"
-        
-        expLValues = [1000.00000, 1000.00000]
-        expStat = Stat(expLValues) 
-        
-        f = open(acronym +".o1", "w")
-        f.write("executionTime=1000.000000")
-        f.close()
-        f = open(acronym +".o2", "w")
-        f.write("executionTime=1000.000000")
-        f.close()
-        
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        obsStat = iLauncher.getStatsOfExecutionTime(acronym)
-        
-        self.assertEqual(expStat, obsStat)           
-           
-    def test_endRun(self):
-        acronym = "testU_EndRun"
-        cmd_start = "log = os.system( \"date;sleep 10;date\" )\n"
-        pyFileName = "%s/launcherFileTest_EndRun.py" % os.getcwd()
-        if Test_Launcher.SARUMAN_NAME == os.getenv("HOSTNAME"):
-            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName, lResources=["test=TRUE"])
-        else: 
-            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName)
- 
-        iWriteScript = WriteScript(iJob, self._jobdb, self._cDir, self._tmpDir)
-        iWriteScript.run(cmd_start, "", pyFileName)
-        os.chmod(pyFileName, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
-        self._jobdb.submitJob(iJob)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.job.groupid = self._groupid
-        iLauncher.endRun()
-        
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 0)
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "error") == 0)
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "waiting") == 0)    
-           
-        os.remove(iJob.launcher)
-
-    def test_clean(self):
-        acronym = "test_clean"
-        f = open("ClusterLauncher" + acronym + ".py", "w")
-        f.close()
-        f = open(acronym + ".o1", "w")
-        f.close()
-        f = open(acronym + ".e1", "w")
-        f.close()
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.clean(acronym)
-        self.assertFalse(FileUtils.isRessourceExists("ClusterLauncher" + acronym + ".py"))
-
-    def test_clean_without_acronym(self):
-        acronym = ""
-        acronym2 = "toto"
-        f = open("ClusterLauncher" + acronym2 + ".py", "w")
-        f.close()
-        f = open(acronym2 + ".o1", "w")
-        f.close()
-        f = open(acronym2 + ".e1", "w")
-        f.close()
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym2)
-        iLauncher.clean(acronym)
-        self.assertFalse(FileUtils.isRessourceExists("ClusterLauncher" + acronym2 + ".py"))
-        
-    def test_getQueueNameAndResources_queue_no_resource(self):
-        configQueue = "all.q"
-        expQueueName = "all.q"
-        expResources = []
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-    def test_getQueueNameAndResources_queue_one_resource(self):
-        configQueue = "test.q 'test=TRUE'"
-        expQueueName = "test.q"
-        expResources = ["test=TRUE"]
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-    def test_getQueueNameAndResources_queue_two_resources(self):
-        configQueue = "big.q 's_data=8G s_cpu=96:00:00'"
-        expQueueName = "big.q"
-        expResources = ["s_data=8G", "s_cpu=96:00:00"]
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-    def test_getQueueNameAndResources_no_queue_no_resource(self):
-        configQueue = ""
-        expQueueName = ""
-        expResources = []
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-    def test_getQueueNameAndResources_no_queue_one_resource(self):
-        configQueue = "s_data=8G"
-        expQueueName = ""
-        expResources = ["s_data=8G"]
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-    def test_getQueueNameAndResources_no_queue_two_resource(self):
-        configQueue = "s_data=8G s_cpu=96:00:00"
-        expQueueName = ""
-        expResources = ["s_data=8G", "s_cpu=96:00:00"]
-        iLauncher = Launcher(self._jobdb)
-        obsQueueName, obsResources = iLauncher.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)      
-
-#   #TODO: test with at least 2 lines in cmd
-    def test_runSingleJob(self):
-        acronym = "Test_runSingleJob"
-        os.mkdir(acronym)
-        os.chdir(acronym)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", os.getcwd(), self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.job.groupid = self._groupid
-        iLauncher.job.jobname = acronym
-        iLauncher.job.queue = self._queue
-        if Test_Launcher.SARUMAN_NAME == os.getenv("HOSTNAME"):
-            iLauncher.job.lResources = ["test=TRUE"]
-        cmd = "log = os.system(\"touch 'YuFei'\")\n"
-        iLauncher.runSingleJob(cmd)
-        time.sleep(20)
-        jobStatus = self._jobdb.getJobStatus(iLauncher.job)
-        os.chdir(self._cDir)
-        shutil.rmtree(acronym)
-        self.assertEqual(jobStatus, "finished")
-        
-    def test_runSingleJob_catch_error_wrong_tmpDir(self):
-        acronym = "Test_runSingleJob_catch_error"
-        os.mkdir(acronym)
-        os.chdir(acronym)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", os.getcwd(), "%s/toto" % self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.job.groupid = self._groupid
-        iLauncher.job.jobname = acronym
-        iLauncher.job.queue = self._queue
-        if Test_Launcher.SARUMAN_NAME == os.getenv("HOSTNAME"):
-            iLauncher.job.lResources = ["test=TRUE"]
-        cmd = "log = os.system(\"touch 'YuFei'\")\n"
-        iLauncher.runSingleJob(cmd)
-        time.sleep(20)
-        jobStatus = self._jobdb.getJobStatus(iLauncher.job) 
-        os.chdir(self._cDir)
-        shutil.rmtree(acronym)
-        self.assertEqual(jobStatus, "error")
-        
-    def test_runSingleJob_catch_error_wrong_cmd(self):
-        acronym = "Test_runSingleJob_catch_error"
-        os.mkdir(acronym)
-        os.chdir(acronym)
-        iLauncher = Launcher(self._jobdb, os.getcwd(), "", "", os.getcwd(), self._tmpDir, "", self._queue, self._groupid, acronym)
-        iLauncher.job.groupid = self._groupid
-        iLauncher.job.jobname = acronym
-        iLauncher.job.queue = self._queue
-        if Test_Launcher.SARUMAN_NAME == os.getenv("HOSTNAME"):
-            iLauncher.job.lResources = ["test=TRUE"]
-        cmd = "log = os.system(\"truc -i toto\")\n"
-        iLauncher.runSingleJob(cmd)
-        time.sleep(20)
-        jobStatus = self._jobdb.getJobStatus(iLauncher.job) 
-        self._jobdb.cleanJobGroup(self._groupid)
-        os.chdir(self._cDir)
-        shutil.rmtree(acronym)
-        self.assertEqual(jobStatus, "error")
-
-    def test_prepareCommands(self):
-        expCmdStart = "os.symlink(\"../Yufei_chunks.fa\", \"Yufei_chunks.fa\")\n\tos.symlink(\"../Yufei_chunks.fa_cut\", \"Yufei_chunks.fa_cut\")\n\tlog = os.system(\"touch file\")\n\t" 
-        expCmdFinish = "if os.path.exists(\"yufei.align\"):\n\t\tshutil.move(\"yufei.align\", \"yufeiLuo/.\" )\n\t"
-        expCmdSize = "fileSize = 3.2\n\t\t"
-        expCmdCopy = "shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa\", \".\")\n\t\tshutil.copy(\"PY/Yufei_db/Yufei_chunks.fa_cut\", \".\")\n\t\t"
-        
-        lCmdStart = []
-        lCmdStart.append("os.symlink(\"../Yufei_chunks.fa\", \"Yufei_chunks.fa\")")
-        lCmdStart.append("os.symlink(\"../Yufei_chunks.fa_cut\", \"Yufei_chunks.fa_cut\")")
-        lCmds = []
-        lCmds.append("log = os.system(\"touch file\")")
-        lCmdFinish = []
-        lCmdFinish.append("if os.path.exists(\"yufei.align\"):")
-        lCmdFinish.append("\tshutil.move(\"yufei.align\", \"yufeiLuo/.\" )") 
-        lCmdSize = []
-        lCmdSize.append("fileSize = 3.2")    
-        lCmdCopy = []
-        lCmdCopy.append("shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa\", \".\")")
-        lCmdCopy.append("shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa_cut\", \".\")")
-
-        iLauncher = Launcher(self._jobdb)
-        obsCmdStart, obsCmdFinish, obsCmdSize, obsCmdCopy = iLauncher.prepareCommands(lCmds, lCmdStart, lCmdFinish, lCmdSize, lCmdCopy)         
-        
-        self.assertEquals(expCmdStart, obsCmdStart)
-        self.assertEquals(expCmdFinish, obsCmdFinish)      
-        self.assertEquals(expCmdSize, obsCmdSize)
-        self.assertEquals(expCmdCopy, obsCmdCopy)
-        
-    def test_getSystemCommand(self):
-        prg = "touch"
-        lArgs = []
-        lArgs.append("file")
-        expCmd = "log = os.system(\"touch file\")"
-        iLauncher = Launcher(self._jobdb)
-        obsCmd = iLauncher.getSystemCommand(prg, lArgs)
-        self.assertEquals(expCmd, obsCmd)
-
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/launcher/test/Test_Launcher2.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,356 +0,0 @@
-from commons.core.utils.FileUtils import FileUtils
-from commons.core.launcher.Launcher2 import Launcher2
-from commons.core.launcher.Launcher2 import LauncherParameter
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.stat.Stat import Stat
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-import unittest
-import os
-import shutil
-import time
-import stat
-
-#TODO: Test_F_Launcher2.py : to execute prepareCommands() and runSingleJob()
-#                            to test runLauncher2ForMultipleJobs()
-#TODO: check clean of "Test_runSingleJob"
-#TODO: refactoring => choose between "self._queue" or "lResources" to set resources
-class Test_Launcher2(unittest.TestCase):
-
-    SARUMAN_NAME = "compute-2-46.local"
-    
-    def setUp(self):
-        self._cDir = os.getcwd()
-        self._tmpDir = self._cDir
-        self._groupid = "test"
-        self._jobTable = "dummyJobTable"
-        self._iDb = DbFactory.createInstance()
-        self._iDb.createTable(self._jobTable, "jobs", overwrite = True)
-        self._jobdb = TableJobAdaptatorFactory.createInstance(self._iDb, self._jobTable)
-        self._queue = ""
-        self._configFileName = "dummyConfigFile"
-    
-    def tearDown(self):
-        self._iDb.dropTable(self._jobTable)
-        self._iDb.close()
-        FileUtils.removeFilesByPattern('*.e*')
-        FileUtils.removeFilesByPattern('*.o*')
-        FileUtils.removeFilesByPattern('Launcher2FileTest_BeginRun.py')
-        FileUtils.removeFilesByPattern(self._configFileName)
-        FileUtils.removeFilesByPattern('ClusterLauncher2_*')
-        
-    def test__init__wrong_fields_for_job_table(self):
-        self._iDb.dropTable(self._jobTable)
-        sqlCmd = "CREATE TABLE " + self._jobTable 
-        sqlCmd += " ( jobid INT UNSIGNED"
-        sqlCmd += ", jobname VARCHAR(255)"
-        sqlCmd += ", groupid VARCHAR(255)"
-        sqlCmd += ", command TEXT"
-        sqlCmd += ", Launcher2 VARCHAR(1024)"
-        sqlCmd += ", queue VARCHAR(255)"
-        sqlCmd += ", status VARCHAR(255)"
-        sqlCmd += ", time DATETIME"
-        sqlCmd += ", node VARCHAR(255) )"
-        self._iDb.execute(sqlCmd)
-        acronym = "Test__init__"
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym)
-        iLauncher2 = Launcher2(launcherParameter)
-
-
-        lExpFields = sorted(["jobid", "jobname", "groupid", "launcher", "queue", "resources", "status", "time", "node"])
-        lObsFields = sorted(self._iDb.getFieldList(self._jobTable))
-        self.assertEquals(lExpFields, lObsFields)
-        expJob = Job(queue = self._queue)
-        obsJob = iLauncher2.job
-        self.assertEquals(expJob, obsJob)
-        
-    def test__init__withResources(self):
-        queue = "main.q mem_free=3G"
-        acronym = "Test__init__"
-        expQueue = "main.q"
-        explResources = ['mem_free=3G']
-        expJob = Job(queue = expQueue, lResources = explResources)
-        
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-	launcherParameter.setQueue(queue)
-        iLauncher2 = Launcher2(launcherParameter)
-
-        obsJob = iLauncher2.job
-        self.assertEquals(expJob, obsJob)
-
-    def test_createGroupidIfItNotExist(self):
-        acronym = "checkGroupID"
-	
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-        iLauncher2.createGroupidIfItNotExist()
-        obsGroupid = iLauncher2.job.groupid
-        self.assertEquals(self._groupid, obsGroupid)
-
-    def test_createGroupidIfItNotExist_without_groupid(self):
-        groupid = ""
-        acronym = "checkGroupID"
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-        iLauncher2.createGroupidIfItNotExist()
-        obsGroupid = iLauncher2.job.groupid
-        self.assertTrue(obsGroupid != "")
-        
-    def test_beginRun_with_Job_finished_in_Table(self):
-        acronym = "BeginRun"
-        iJob = Job(queue = self._queue)
-        self._jobdb.recordJob(iJob)
-        self._jobdb.changeJobStatus(iJob, "finished")
-
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-
-        iLauncher2.beginRun()
-        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 0)
-        
-#    def test_beginRun_with_Job_unfinished_in_Table(self):
-#        acronym = "testU_BeginRun"
-#        cmd_start = "log = os.system( \"date;sleep 10;date\" )\n"
-#        pyFileName = "%s/Launcher22FileTest_BeginRun.py" % os.getcwd()
-#        if Test_Launcher2.SARUMAN_NAME == os.getenv("HOSTNAME"):
-#            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName, lResources=["test=TRUE"])
-#        else: 
-#            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName)
-#        iWriteScript = WriteScript(iJob, self._jobdb, self._cDir, self._tmpDir)
-#        iWriteScript.run(cmd_start, "", pyFileName)
-#        os.chmod(pyFileName, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
-#        self._jobdb.submitJob(iJob)
-
-#	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-#        iLauncher2 = Launcher2(launcherParameter)
-        
-#        iLauncher2.beginRun()
-        
-#        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 1)
-#    
-    def test_getStatsOfExecutionTime(self):
-        acronym = "test_statTime"
-        
-        expLValues = [1000.00000, 1000.00000]
-        expStat = Stat(expLValues) 
-        
-        f = open(acronym +".o1", "w")
-        f.write("executionTime=1000.000000")
-        f.close()
-        f = open(acronym +".o2", "w")
-        f.write("executionTime=1000.000000")
-        f.close()
-        
-        launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-        obsStat = iLauncher2.getStatsOfExecutionTime(acronym)
-
-        self.assertEqual(expStat, obsStat)           
-#           
-#    def test_endRun(self):
-#        acronym = "testU_EndRun"
-#        cmd_start = "log = os.system( \"date;sleep 10;date\" )\n"
-#        pyFileName = "%s/Launcher22FileTest_EndRun.py" % os.getcwd()
-#        if Test_Launcher2.SARUMAN_NAME == os.getenv("HOSTNAME"):
-#            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName, lResources=["test=TRUE"])
-#        else: 
-#            iJob = Job(1, acronym, self._groupid, "", cmd_start, pyFileName)
-# 
-#        iWriteScript = WriteScript(iJob, self._jobdb, self._cDir, self._tmpDir)
-#        iWriteScript.run(cmd_start, "", pyFileName)
-#        os.chmod(pyFileName, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
-#        self._jobdb.submitJob(iJob)
-#        iLauncher2 = Launcher2(self._jobdb, os.getcwd(), "", "", self._cDir, self._tmpDir, "", self._queue, self._groupid, acronym)
-#        iLauncher2.job.groupid = self._groupid
-#        iLauncher2.endRun()
-#        
-#        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "finished") == 0)
-#        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "error") == 0)
-#        self.assertTrue(self._jobdb.getCountStatus(self._groupid, "waiting") == 0)    
-#           
-#        os.remove(iJob.Launcher22)
-#
-    def test_clean(self):
-        acronym = "test_clean"
-        f = open("ClusterLauncher22" + acronym + ".py", "w")
-        f.close()
-        f = open(acronym + ".o1", "w")
-        f.close()
-        f = open(acronym + ".e1", "w")
-        f.close()
-        
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-        iLauncher2.clean(acronym)
-        self.assertFalse(FileUtils.isRessourceExists("ClusterLauncher22" + acronym + ".py"))
-
-    def test_clean_without_acronym(self):
-        acronym = ""
-        acronym2 = "toto"
-        f = open("ClusterLauncher22" + acronym2 + ".py", "w")
-        f.close()
-        f = open(acronym2 + ".o1", "w")
-        f.close()
-        f = open(acronym2 + ".e1", "w")
-        f.close()
-	
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb, self._groupid, acronym);
-        iLauncher2 = Launcher2(launcherParameter)
-        iLauncher2.clean(acronym)
-        self.assertFalse(FileUtils.isRessourceExists("ClusterLauncher22" + acronym2 + ".py"))
-        
-    def test_getQueueNameAndResources_queue_no_resource(self):
-        configQueue = "all.q"
-        expQueueName = "all.q"
-        expResources = []
-	launcherParameter =  LauncherParameter.createParameter(self._jobdb);
-        iLauncher2 = Launcher2(launcherParameter)
-
-        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-        self.assertEquals(expQueueName, obsQueueName)
-        self.assertEquals(expResources, obsResources)
-        
-#    def test_getQueueNameAndResources_queue_one_resource(self):
-#        configQueue = "test.q 'test=TRUE'"
-#        expQueueName = "test.q"
-#        expResources = ["test=TRUE"]
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-#        self.assertEquals(expQueueName, obsQueueName)
-#        self.assertEquals(expResources, obsResources)
-        
-#    def test_getQueueNameAndResources_queue_two_resources(self):
-#        configQueue = "big.q 's_data=8G s_cpu=96:00:00'"
-#        expQueueName = "big.q"
-#        expResources = ["s_data=8G", "s_cpu=96:00:00"]
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-#        self.assertEquals(expQueueName, obsQueueName)
-#        self.assertEquals(expResources, obsResources)
-        
-#    def test_getQueueNameAndResources_no_queue_no_resource(self):
-#        configQueue = ""
-#        expQueueName = ""
-#        expResources = []
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-#        self.assertEquals(expQueueName, obsQueueName)
-#        self.assertEquals(expResources, obsResources)
-#        
-#    def test_getQueueNameAndResources_no_queue_one_resource(self):
-#        configQueue = "s_data=8G"
-#        expQueueName = ""
-#        expResources = ["s_data=8G"]
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-#        self.assertEquals(expQueueName, obsQueueName)
-#        self.assertEquals(expResources, obsResources)
-#        
-#    def test_getQueueNameAndResources_no_queue_two_resource(self):
-#        configQueue = "s_data=8G s_cpu=96:00:00"
-#        expQueueName = ""
-#        expResources = ["s_data=8G", "s_cpu=96:00:00"]
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsQueueName, obsResources = iLauncher2.getQueueNameAndResources(configQueue)
-#        self.assertEquals(expQueueName, obsQueueName)
-#        self.assertEquals(expResources, obsResources)      
-#
-##   #TODO: test with at least 2 lines in cmd
-#    def test_runSingleJob(self):
-#        acronym = "Test_runSingleJob"
-#        os.mkdir(acronym)
-#        os.chdir(acronym)
-#        iLauncher2 = Launcher2(self._jobdb, os.getcwd(), "", "", os.getcwd(), self._tmpDir, "", self._queue, self._groupid, acronym)
-#        iLauncher2.job.groupid = self._groupid
-#        iLauncher2.job.jobname = acronym
-#        iLauncher2.job.queue = self._queue
-#        if Test_Launcher2.SARUMAN_NAME == os.getenv("HOSTNAME"):
-#            iLauncher2.job.lResources = ["test=TRUE"]
-#        cmd = "log = os.system(\"touch 'YuFei'\")\n"
-#        iLauncher2.runSingleJob(cmd)
-#        time.sleep(20)
-#        jobStatus = self._jobdb.getJobStatus(iLauncher2.job)
-#        os.chdir(self._cDir)
-#        shutil.rmtree(acronym)
-#        self.assertEqual(jobStatus, "finished")
-#        
-#    def test_runSingleJob_catch_error_wrong_tmpDir(self):
-#        acronym = "Test_runSingleJob_catch_error"
-#        os.mkdir(acronym)
-#        os.chdir(acronym)
-#        iLauncher2= Launcher2(self._jobdb, os.getcwd(), "", "", os.getcwd(), "%s/toto" % self._tmpDir, "", self._queue, self._groupid, acronym)
-#        iLauncher2.job.groupid = self._groupid
-#        iLauncher2.job.jobname = acronym
-#        iLauncher2.job.queue = self._queue
-#        if Test_Launcher2.SARUMAN_NAME == os.getenv("HOSTNAME"):
-#            iLauncher2.job.lResources = ["test=TRUE"]
-#        cmd = "log = os.system(\"touch 'YuFei'\")\n"
-#        iLauncher2.runSingleJob(cmd)
-#        time.sleep(20)
-#        jobStatus = self._jobdb.getJobStatus(iLauncher2.job) 
-#        os.chdir(self._cDir)
-#        shutil.rmtree(acronym)
-#        self.assertEqual(jobStatus, "error")
-#        
-#    def test_runSingleJob_catch_error_wrong_cmd(self):
-#        acronym = "Test_runSingleJob_catch_error"
-#        os.mkdir(acronym)
-#        os.chdir(acronym)
-#        iLauncher2 = Launcher2(self._jobdb, os.getcwd(), "", "", os.getcwd(), self._tmpDir, "", self._queue, self._groupid, acronym)
-#        iLauncher2.job.groupid = self._groupid
-#        iLauncher2.job.jobname = acronym
-#        iLauncher2.job.queue = self._queue
-#        if Test_Launcher2.SARUMAN_NAME == os.getenv("HOSTNAME"):
-#            iLauncher2.job.lResources = ["test=TRUE"]
-#        cmd = "log = os.system(\"truc -i toto\")\n"
-#        iLauncher2.runSingleJob(cmd)
-#        time.sleep(20)
-#        jobStatus = self._jobdb.getJobStatus(iLauncher2.job) 
-#        self._jobdb.cleanJobGroup(self._groupid)
-#        os.chdir(self._cDir)
-#        shutil.rmtree(acronym)
-#        self.assertEqual(jobStatus, "error")
-#
-#    def test_prepareCommands(self):
-#        expCmdStart = "os.symlink(\"../Yufei_chunks.fa\", \"Yufei_chunks.fa\")\n\tos.symlink(\"../Yufei_chunks.fa_cut\", \"Yufei_chunks.fa_cut\")\n\tlog = os.system(\"touch file\")\n\t" 
-#        expCmdFinish = "if os.path.exists(\"yufei.align\"):\n\t\tshutil.move(\"yufei.align\", \"yufeiLuo/.\" )\n\t"
-#        expCmdSize = "fileSize = 3.2\n\t\t"
-#        expCmdCopy = "shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa\", \".\")\n\t\tshutil.copy(\"PY/Yufei_db/Yufei_chunks.fa_cut\", \".\")\n\t\t"
-#        
-#        lCmdStart = []
-#        lCmdStart.append("os.symlink(\"../Yufei_chunks.fa\", \"Yufei_chunks.fa\")")
-#        lCmdStart.append("os.symlink(\"../Yufei_chunks.fa_cut\", \"Yufei_chunks.fa_cut\")")
-#        lCmds = []
-#        lCmds.append("log = os.system(\"touch file\")")
-#        lCmdFinish = []
-#        lCmdFinish.append("if os.path.exists(\"yufei.align\"):")
-#        lCmdFinish.append("\tshutil.move(\"yufei.align\", \"yufeiLuo/.\" )") 
-#        lCmdSize = []
-#        lCmdSize.append("fileSize = 3.2")    
-#        lCmdCopy = []
-#        lCmdCopy.append("shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa\", \".\")")
-#        lCmdCopy.append("shutil.copy(\"PY/Yufei_db/Yufei_chunks.fa_cut\", \".\")")
-#
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsCmdStart, obsCmdFinish, obsCmdSize, obsCmdCopy = iLauncher2.prepareCommands(lCmds, lCmdStart, lCmdFinish, lCmdSize, lCmdCopy)         
-#        
-#        self.assertEquals(expCmdStart, obsCmdStart)
-#        self.assertEquals(expCmdFinish, obsCmdFinish)      
-#        self.assertEquals(expCmdSize, obsCmdSize)
-#        self.assertEquals(expCmdCopy, obsCmdCopy)
-#        
-#    def test_getSystemCommand(self):
-#        prg = "touch"
-#        lArgs = []
-#        lArgs.append("file")
-#        expCmd = "log = os.system(\"touch file\")"
-#        iLauncher2 = Launcher2(self._jobdb)
-#        obsCmd = iLauncher2.getSystemCommand(prg, lArgs)
-#        self.assertEquals(expCmd, obsCmd)
-
-
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_Launcher2 ) )
-if __name__ == "__main__":
-        unittest.TextTestRunner(verbosity=2).run( test_suite )    
--- a/commons/core/launcher/test/Test_LauncherUtils.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-import unittest
-from commons.core.launcher.LauncherUtils import LauncherUtils
-
-class Test_LauncherUtils(unittest.TestCase):
-        
-    def test_createHomogeneousSizeList_empty(self):
-        lHeadersSizeTuples = []
-        maxSize = 500
-        expLHeadersList = []
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_one_item_upper_mean(self):
-        lHeadersSizeTuples = [("h1", 300)]
-        maxSize = 500
-        expLHeadersList = [["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_one_item_under_mean(self):
-        lHeadersSizeTuples = [("h1", 100)]
-        maxSize = 500
-        expLHeadersList = [["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_3items(self):
-        lHeadersSizeTuples = [("h1", 250),
-                              ("h2", 250),
-                              ("h3", 300)]
-        maxSize = 500
-        expLHeadersList = [["h3"], ["h2"], ["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-
-    def test_createHomogeneousSizeList_4items(self):
-        lHeadersSizeTuples = [("h1", 100),
-                              ("h2", 200),
-                              ("h3", 10),
-                              ("h4", 400)]
-        maxSize = 500
-        expLHeadersList = [["h4", "h3"], ["h2", "h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_5items(self):
-        lHeadersSizeTuples = [("h1", 300),
-                              ("h2", 300),
-                              ("h3", 250),
-                              ("h4", 100),
-                              ("h5", 90)]
-        maxSize = 500
-        expLHeadersList = [["h2", "h5","h4"], ["h1"], ["h3"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_all_upper_max(self):
-        lHeadersSizeTuples = [("h1", 600),
-                              ("h2", 500),
-                              ("h3", 700),
-                              ("h4", 900),
-                              ("h5", 500)]
-        maxSize = 500
-        expLHeadersList = [["h4"], ["h3"], ["h1"], ["h5"], ["h2"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_all_upper_mean(self):
-        lHeadersSizeTuples = [("h1", 300),
-                              ("h2", 300),
-                              ("h3", 300),
-                              ("h4", 300),
-                              ("h5", 300)]
-        maxSize = 500
-        expLHeadersList = [["h5"], ["h4"], ["h3"], ["h2"], ["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_all_under_mean(self):
-        lHeadersSizeTuples = [("h1", 100),
-                              ("h2", 100),
-                              ("h3", 100),
-                              ("h4", 100),
-                              ("h5", 100)]
-        maxSize = 500
-        expLHeadersList = [["h5", "h4", "h3", "h2"], ["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-        
-    def test_createHomogeneousSizeList_floats(self):
-        lHeadersSizeTuples = [("h1", 99.1),
-                              ("h2", 100.7),
-                              ("h3", 100.1),
-                              ("h4", 100.1),
-                              ("h5", 100)]
-        maxSize = 500
-        expLHeadersList = [['h2', 'h4', 'h3', 'h5'], ["h1"]]
-        obsLHeadersList = LauncherUtils.createHomogeneousSizeList(lHeadersSizeTuples, maxSize)
-        self.assertEquals(expLHeadersList, obsLHeadersList)
-
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/launcher/test/Test_WriteScript.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,365 +0,0 @@
-from commons.core.utils.FileUtils import FileUtils
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.sql.Job import Job
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-import unittest
-import os
-import shutil
-import time
-import threading
-
-class Test_WriteScript(unittest.TestCase):
-
-    def setUp(self):
-        self._testDir = os.getcwd()
-        self._acronym = "dummyAcronym"
-        self._jobTable = "dummyJobsTable"
-        self._iDb = DbFactory.createInstance()
-        self._iDb.createTable(self._jobTable, "jobs", overwrite = True)
-        self._jobdb = TableJobAdaptatorFactory.createInstance(self._iDb, self._jobTable)
-        self._job = Job()
-        self._job.groupid = "groupid"
-        self._job.jobname = self._acronym
-        self._job.launcher = "ClusterLauncher"
-        self._jobdb.recordJob(self._job)
-        self._dummyScratch = "dummyScratch"
-        os.mkdir(self._dummyScratch)
-        os.chdir(self._dummyScratch)
-        self._tmpDir = os.getcwd()
-        self._iScriptWriter = WriteScript(self._job, self._jobdb, self._testDir, self._tmpDir)
-        
-    def tearDown(self):
-        self._iDb.dropTable(self._jobTable)
-        self._iDb.close()
-        if FileUtils.isRessourceExists(self._dummyScratch):
-            shutil.rmtree(self._dummyScratch)
-
-    def test_run(self):
-        isScriptAsRun = False
-        fileToCreate = 'dummyFile'
-        cmdStart = "log = os.system( \"touch %s\" )\n" % fileToCreate
-        cmdFinish = "os.system(\"mv %s %s\" )\n" % (fileToCreate, self._testDir)
-        pyFileName = "%s/ClusterLauncher_%s.py" % (os.getcwd(), self._acronym)       
-        
-        self._iScriptWriter.run(cmdStart, cmdFinish, pyFileName)
-        os.system("python %s" % pyFileName)
-
-        os.chdir(self._testDir)
-        if FileUtils.isRessourceExists(fileToCreate):
-            os.remove(fileToCreate)
-            isScriptAsRun = True
-        expJobStatus = "finished"    
-        obsJobStatus = self._jobdb.getJobStatus(self._job)
-            
-        self.assertTrue(isScriptAsRun)
-        self.assertEquals(expJobStatus, obsJobStatus)
-        
-    def test_run_with_cmdSize_and_cmdCopy(self):
-        isScriptAsRun = False
-        fileToCreate = 'dummyFile'
-        fileSize = 0.5
-        cmdSize = "fileSize = %f\n" % fileSize
-        cmdCopy = "os.system(\"touch bank.fa\")\n"
-        cmdStart = "log = os.system(\"touch %s\")\n" % fileToCreate
-        cmdFinish = "shutil.move(\"%s\", \"%s\")" % (fileToCreate, self._testDir)
-        pyFileName = "%s/ClusterLauncher_%s.py" % (os.getcwd(), self._acronym)       
-        
-        iWriteScript = WriteScript(self._job, self._jobdb, self._testDir, self._tmpDir, True)
-        iWriteScript.run(cmdStart, cmdFinish, pyFileName, cmdSize, cmdCopy)
-        os.system("python %s" % pyFileName)
-
-        os.chdir(self._testDir)
-        if FileUtils.isRessourceExists(fileToCreate):
-            os.remove(fileToCreate)
-            isScriptAsRun = True
-        expJobStatus = "finished"    
-        obsJobStatus = self._jobdb.getJobStatus(self._job)
-            
-        self.assertTrue(isScriptAsRun)
-        self.assertEquals(expJobStatus, obsJobStatus)
-
-#TODO: how to test ?
-#    def test_run_2_jobs_trying_to_create_same_groupIdDir(self):
-#        fileToCreate1 = 'dummyFile1'
-#        fileToCreate2 = 'dummyFile2'
-#        flagFileOSError = "osErrorRaised"
-#        
-#        fileSize = 0.5
-#        cmd_checkSize = ""
-#        cmd_checkSize += "if not os.path.exists( \"%s\" ):\n" % self._job.groupid
-#        cmd_checkSize += "\tfileSize = %f\n" % fileSize
-#        
-#        cmd_checkGroupidDir1 = ""
-#        cmd_checkGroupidDir1 += "if not os.path.exists(\"%s\"):\n" % self._job.groupid
-#        cmd_checkGroupidDir1 += "\ttry:\n"
-#        cmd_checkGroupidDir1 += "\t\ttime.sleep(10)\n"
-#        cmd_checkGroupidDir1 += "\t\tos.mkdir(\"%s\")\n" % self._job.groupid
-#        cmd_checkGroupidDir1 += "\texcept OSError, e :\n"
-#        cmd_checkGroupidDir1 += "\t\tos.system(\"touch %s\")\n" % flagFileOSError
-#        cmd_checkGroupidDir1 += "\t\tif e.args[0] != 17:\n"
-#        cmd_checkGroupidDir1 += "\t\t\traise RepetException(\"ERROR: can't create '%s'\")\n" % self._job.groupid
-#        cmd_checkGroupidDir1 += "\tos.chdir(\"%s\")\n" % self._job.groupid
-#        cmd_checkGroupidDir1 += "\tos.system(\"touch bank.fa\")\n" #cp
-#        cmd_checkGroupidDir1 += "else:\n"
-#        cmd_checkGroupidDir1 += "\tos.chdir(\"%s\")\n" % self._job.groupid
-#        
-#        cmdStart1 = "log = os.system(\"touch %s\")\n" % fileToCreate1
-#        cmdFinish1 = "shutil.move(\"%s\", \"%s\")\n" % (fileToCreate1, self._testDir)
-#        pyFileName1 = "%s/ClusterLauncher1_job1.py" % os.getcwd()
-#       
-#        cmd_checkGroupidDir2 = ""
-#        cmd_checkGroupidDir2 += "if not os.path.exists(\"%s\"):\n" % self._job.groupid
-#        cmd_checkGroupidDir2 += "\ttry:\n"
-#        cmd_checkGroupidDir2 += "\t\tos.mkdir(\"%s\")\n" % self._job.groupid
-#        cmd_checkGroupidDir2 += "\texcept OSError, e :\n"
-#        cmd_checkGroupidDir2 += "\t\tif e.args[0] != 17:\n"
-#        cmd_checkGroupidDir2 += "\t\t\traise RepetException(\"ERROR: can't create '%s'\")\n" % self._job.groupid
-#        cmd_checkGroupidDir2 += "\tos.chdir(\"%s\")\n" % self._job.groupid
-#        cmd_checkGroupidDir2 += "\tos.system(\"touch bank.fa\")\n" #cp
-#        cmd_checkGroupidDir2 += "else:\n"
-#        cmd_checkGroupidDir2 += "\tos.chdir(\"%s\")\n" % self._job.groupid
-#        
-#        cmdStart2 = "log = os.system(\"touch %s\")\n" % fileToCreate2
-#        cmdFinish2 = "shutil.move(\"%s\", \"%s\")\n" % (fileToCreate2, self._testDir)
-#        pyFileName2 = "%s/ClusterLauncher2_job2.py" % os.getcwd()
-#            
-#        job1 = Job(self._jobTable, jobname = "job1", groupid = self._job.groupid)
-#        self._jobdb.recordJob(job1)
-#        job2 = Job(self._jobTable, jobname = "job2", groupid = self._job.groupid)
-#        self._jobdb.recordJob(job2)
-#        iScriptWriter1 = WriteScript(job1, self._jobdb, self._testDir, self._tmpDir)
-#        iScriptWriter1.run(cmdStart1, cmdFinish1, pyFileName1, cmd_checkSize, cmd_checkGroupidDir1)
-#        iScriptWriter2 = WriteScript(job2, self._jobdb, self._testDir, self._tmpDir)
-#        iScriptWriter2.run(cmdStart2, cmdFinish2, pyFileName2, cmd_checkSize, cmd_checkGroupidDir2)
-#    
-#        iCFT1 = CreateFileThread(pyFileName1)
-#        iCFT2 = CreateFileThread(pyFileName2)
-#        iCFT1.start()
-#        iCFT2.start()
-#        while iCFT1.isAlive() or iCFT2.isAlive():
-#            time.sleep(5)
-#        self.assertTrue(FileUtils.isRessourceExists(flagFileOSError))
-#        os.chdir(self._testDir)
-#        
-#        if FileUtils.isRessourceExists(fileToCreate1):
-#            os.remove(fileToCreate1)
-#            
-#        if FileUtils.isRessourceExists(fileToCreate2):            
-#            os.remove(fileToCreate2)
-    
-    def test_run_2_lines_in_cmd_start(self):
-        isScriptAsRun = False
-        fileToCreate = 'dummyFile'
-        
-        cmdStart = "log = 0\n\t"
-        cmdStart += "if True:\n\t"
-        cmdStart += "\tos.system( \"touch dummyFile\" )\n"
-        cmdFinish = "os.system(\"mv %s %s\" )\n" % (fileToCreate, self._testDir)
-        pyFileName = "%s/ClusterLauncher_%s.py" % (os.getcwd(), self._acronym)       
-        
-        self._iScriptWriter.run(cmdStart, cmdFinish, pyFileName)
-        os.system("python %s" % pyFileName)
-        
-        os.chdir(self._testDir)
-        if FileUtils.isRessourceExists(fileToCreate):
-            os.remove(fileToCreate)
-            isScriptAsRun = True
-        self.assertTrue(isScriptAsRun)
-
-    def test_run_2_lines_in_cmd_finish(self):
-        isScriptAsRun = False
-        fileToCreate = 'dummyFile'
-        
-        cmdStart = "log = 0\n\t"
-        cmdStart += "if True:\n\t"
-        cmdStart += "\tos.system( \"touch dummyFile\" )\n"
-        cmdFinish = "if True:\n\t"
-        cmdFinish += "\tos.system(\"mv %s %s\" )\n" % (fileToCreate, self._testDir)
-        pyFileName = "%s/ClusterLauncher_%s.py" % (os.getcwd(), self._acronym)       
-        
-        self._iScriptWriter.run(cmdStart, cmdFinish, pyFileName)
-        os.system("python %s" % pyFileName)
-        
-        os.chdir(self._testDir)
-        if FileUtils.isRessourceExists(fileToCreate):
-            os.remove(fileToCreate)
-            isScriptAsRun = True
-        self.assertTrue(isScriptAsRun)
-        
-    def test_fillTemplate_with_JobScriptTemplate(self):
-        os.chdir("..")
-        d = {
-             "tmpDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch",
-             "jobTableName" : "dummyJobsTable",
-             "groupId" : "groupid",
-             "jobName" : "job1",
-             "launcher" : "ClusterLauncher",
-             "time" : "20110505-105353",
-             "repet_path" : "/home/user/workspace/repet_pipe",
-             "repet_host" : "pisano",
-             "repet_user" : "user",
-             "repet_pw" : "user",
-             "repet_db" : "repet_user",
-             "repet_port" : "3306",
-             "cmdStart" : "log = os.system(\"touch dummyFile1\")",
-             "cmdFinish" : "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")",
-             "cDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/"
-             }
-        expFileName = "expFiles/expJobScriptTemplate.py"
-        obsFileName = "obsFile.py"
-        
-        iWS = WriteScript()
-        iWS.fillTemplate(obsFileName, d)
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        os.remove(obsFileName)
-        
-    def test_fillTemplate_with_JobScriptTemplate_2_lines_in_cmd_start(self):
-        os.chdir("..")
-        d = {
-             "tmpDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch",
-             "jobTableName" : "dummyJobsTable",
-             "groupId" : "groupid",
-             "jobName" : "job1",
-             "launcher" : "ClusterLauncher",
-             "time" : "20110505-105353",
-             "repet_path" : "/home/user/workspace/repet_pipe",
-             "repet_host" : "pisano",
-             "repet_user" : "user",
-             "repet_pw" : "user",
-             "repet_db" : "repet_user",
-             "repet_port" : "3306",
-             "cmdStart" : "print \"Hello Yufei\"\n\tlog = os.system(\"touch dummyFile1\")",
-             "cmdFinish" : "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")",
-             "cDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/"
-             }
-        expFileName = "expFiles/expJobScriptTemplate_cmdWith2Lines.py"
-        obsFileName = "obsFile.py"
-        
-        iWS = WriteScript()
-        iWS.fillTemplate(obsFileName, d)
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        os.remove(obsFileName)
-        
-    def test_fillTemplate_with_JobScriptWithFilesCopyTemplate(self):
-        os.chdir("..")
-        d = {
-             "tmpDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch",
-             "jobTableName" : "dummyJobsTable",
-             "groupId" : "groupid",
-             "jobName" : "job1",
-             "launcher" : "ClusterLauncher",
-             "time" : "20110505-105353",
-             "repet_path" : "/home/user/workspace/repet_pipe",
-             "repet_host" : "pisano",
-             "repet_user" : "user",
-             "repet_pw" : "user",
-             "repet_db" : "repet_user",
-             "repet_port" : "3306",
-             "cmdStart" : "log = os.system(\"touch dummyFile1\")",
-             "cmdFinish" : "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")",
-             "cDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/",
-             "cmdSize" : "fileSize = 0.500000",
-             "cmdCopy" : "os.system(\"touch bank.fa\")"
-             }
-        expFileName = "expFiles/expJobScriptWithFilesCopyTemplate.py"
-        obsFileName = "obsFile.py"
-        
-        iWS = WriteScript(chooseTemplateWithCopy = True)
-        iWS.fillTemplate(obsFileName, d)
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        os.remove(obsFileName)
-
-    def test_fillTemplate_with_JobScriptTemplateLight(self):
-        os.chdir("..")
-        d = {
-             "tmpDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch",
-             "jobTableName" : "dummyJobsTable",
-             "groupId" : "groupid",
-             "jobName" : "job1",
-             "launcher" : "ClusterLauncher",
-             "time" : "20110505-105353",
-             "repet_path" : "/home/user/workspace/repet_pipe",
-             "cmdStart" : "log = os.system(\"touch dummyFile1\")",
-             "cmdFinish" : "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")",
-             "cDir" : "/home/user/workspace/repet_pipe/commons/core/launcher/test/",
-             "cmdSize" : "fileSize = 0.500000",
-             "cmdCopy" : "os.system(\"touch bank.fa\")"
-             }
-        expFileName = "expFiles/expJobScriptTemplateLight.py"
-        obsFileName = "obs.py"
-        
-        iWS = WriteScript(chooseTemplateLight = True)
-        iWS.fillTemplate(obsFileName, d)
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        os.remove(obsFileName)
-        
-    def test_createJobScriptDict(self):
-        os.chdir("..")
-        cmd_start = "log = os.system(\"touch dummyFile1\")"
-        cmd_finish = "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")"
-        cmd_size = ""
-        cmd_copy = ""
-        expDict = {
-             "tmpDir" : self._tmpDir,
-             "jobTableName" : self._jobTable,
-             "groupId" : self._job.groupid,
-             "jobName" : self._acronym,
-             "launcher" : self._job.launcher,
-             "time" : time.strftime("%Y%m%d-%H%M%S"),
-             "repet_path" : os.environ["REPET_PATH"],
-             "repet_host" : os.environ["REPET_HOST"],
-             "repet_user" : os.environ["REPET_USER"],
-             "repet_pw" : os.environ["REPET_PW"],
-             "repet_db" : os.environ["REPET_DB"],
-             "repet_port" : os.environ["REPET_PORT"],
-             "cmdStart" : cmd_start,
-             "cmdFinish" : cmd_finish,
-             "cDir" : self._testDir,
-             "cmdSize" : cmd_size,
-             "cmdCopy" : cmd_copy
-             }
-        obsDict = self._iScriptWriter.createJobScriptDict(cmd_start, cmd_finish, cmd_size, cmd_copy)
-        self.assertEquals(expDict, obsDict)
-        
-    def test_createJobScriptDict_with_cmdSize_and_cmdCopy(self):
-        os.chdir("..")
-        cmd_start = "log = os.system(\"touch dummyFile1\")"
-        cmd_finish = "shutil.move(\"dummyFile1\", \"/home/user/workspace/repet_pipe/commons/core/launcher/test\")"
-        cmd_size = "fileSize = 0.500000"
-        cmd_copy = "os.system(\"touch bank.fa\")"
-        expDict = {
-             "tmpDir" : self._tmpDir,
-             "jobTableName" : self._jobTable,
-             "groupId" : self._job.groupid,
-             "jobName" : self._acronym,
-             "launcher" : self._job.launcher,
-             "time" : time.strftime("%Y%m%d-%H%M%S"),
-             "repet_path" : os.environ["REPET_PATH"],
-             "repet_host" : os.environ["REPET_HOST"],
-             "repet_user" : os.environ["REPET_USER"],
-             "repet_pw" : os.environ["REPET_PW"],
-             "repet_db" : os.environ["REPET_DB"],
-             "repet_port" : os.environ["REPET_PORT"],
-             "cmdStart" : cmd_start,
-             "cmdFinish" : cmd_finish,
-             "cDir" : self._testDir,
-             "cmdSize" : cmd_size,
-             "cmdCopy" : cmd_copy
-             }
-        obsDict = self._iScriptWriter.createJobScriptDict(cmd_start, cmd_finish, cmd_size, cmd_copy)
-        self.assertEquals(expDict, obsDict)
-        
-class CreateFileThread(threading.Thread):
-
-    def __init__(self, pyFileName):
-        threading.Thread.__init__(self)
-        self._pyFileName = pyFileName
-        
-    def run(self):
-        os.system("python %s" % self._pyFileName)
-
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_WriteScript ) )
-if __name__ == "__main__":
-        unittest.TextTestRunner(verbosity=2).run( test_suite )    
--- a/commons/core/launcher/test/expFiles/expJobScriptSQLiteWithFilesCopyTemplate.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.DbSQLite import DbSQLite
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'"
-	sys.stdout.flush()
-	if not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"):
-		raise IOError("ERROR: temporary directory '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch' doesn't exist")
-	
-	fileSize = 0
-	if not os.path.exists("groupid"):
-		fileSize = 0.500000
-	freeGigaNeededInTmpDir = float(1 + fileSize)
-	freeSpace = os.statvfs("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < freeGigaNeededInTmpDir):
-		raise RepetException("ERROR: less than %.2fG of input file in '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'" % freeGigaNeededInTmpDir)
-	
-	os.chdir("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if not os.path.exists("groupid"):
-		try:
-			os.mkdir("groupid")
-		except OSError, e :
-			if e.args[0] != 17:
-				raise RepetException("ERROR: can't create 'groupid'")
-		os.chdir("groupid")
-		os.system("touch bank.fa")
-	else:
-		os.chdir("groupid")
-	
-	newDir = "groupid_job1_20110505-105353"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	queue = "main.q"
-	iJob = Job("jobs", jobname = "job1", groupid = "groupid", queue = queue, node = os.getenv("HOSTNAME"))
-	iDb = DbSQLite("/home/user/workspace/repet_pipe/commons/core/launcher/test/jobs")
-	iTJA = TableJobAdaptator(iDb, "jobs")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	iDb.close()
-	
-	log = os.system("touch dummyFile1")
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	shutil.move("dummyFile1", "/home/user/workspace/repet_pipe/commons/core/launcher/test")
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbSQLite("/home/user/workspace/repet_pipe/commons/core/launcher/test/jobs")
-	iTJA = TableJobAdaptator(iDb, "jobs")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-
-except IOError, e :
-	print e
-	queue = "main.q"
-	iJob = Job("jobs", jobname = "job1", groupid = "groupid", queue = queue, node = os.getenv("HOSTNAME"))
-	iDb = DbSQLite("/home/user/workspace/repet_pipe/commons/core/launcher/test/jobs")
-	iTJA = TableJobAdaptator(iDb, "jobs")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"
-	print "cDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir)
-	queue = "main.q"
-	iJob = Job("jobs", jobname = "job1", groupid = "groupid", queue = queue, node = os.getenv("HOSTNAME"))
-	iDb = DbSQLite("/home/user/workspace/repet_pipe/commons/core/launcher/test/jobs")
-	iTJA = TableJobAdaptator(iDb, "jobs")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/launcher/test/expFiles/expJobScriptTemplate.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'"
-	sys.stdout.flush()
-	if not os.path.exists( "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch" ):
-		raise IOError("ERROR: temporary directory '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch' doesn't exist")
-	
-	minFreeGigaInTmpDir = 1
-	freeSpace = os.statvfs("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < minFreeGigaInTmpDir):
-		raise RepetException("ERROR: less than %iG of free space in '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'" % minFreeGigaInTmpDir)
-	
-	os.chdir("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	newDir = "groupid_job1_20110505-105353"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	log = os.system("touch dummyFile1")
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	shutil.move("dummyFile1", "/home/user/workspace/repet_pipe/commons/core/launcher/test")
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"
-	print "cDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir)
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/launcher/test/expFiles/expJobScriptTemplateLight.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'"
-	sys.stdout.flush()
-	if not os.path.exists( "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch" ):
-		raise IOError("ERROR: temporary directory '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch' doesn't exist")
-	
-	minFreeGigaInTmpDir = 1
-	freeSpace = os.statvfs("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < minFreeGigaInTmpDir):
-		raise RepetException("ERROR: less than %iG of free space in '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'" % minFreeGigaInTmpDir)
-	
-	os.chdir("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	newDir = "groupid_job1_20110505-105353"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	log = os.system("touch dummyFile1")
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	shutil.move("dummyFile1", "/home/user/workspace/repet_pipe/commons/core/launcher/test")
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	sys.stdout.flush()
-	sys.exit(1)
\ No newline at end of file
--- a/commons/core/launcher/test/expFiles/expJobScriptTemplate_cmdWith2Lines.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'"
-	sys.stdout.flush()
-	if not os.path.exists( "/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch" ):
-		raise IOError("ERROR: temporary directory '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch' doesn't exist")
-	
-	minFreeGigaInTmpDir = 1
-	freeSpace = os.statvfs("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < minFreeGigaInTmpDir):
-		raise RepetException("ERROR: less than %iG of free space in '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'" % minFreeGigaInTmpDir)
-	
-	os.chdir("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	newDir = "groupid_job1_20110505-105353"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	print "Hello Yufei"
-	log = os.system("touch dummyFile1")
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	shutil.move("dummyFile1", "/home/user/workspace/repet_pipe/commons/core/launcher/test")
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"
-	print "cDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir)
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/launcher/test/expFiles/expJobScriptWithFilesCopyTemplate.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-import time
-import shutil
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TableJobAdaptator import TableJobAdaptator
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.Job import Job
-
-try:
-	newDir = None
-	print os.uname()
-	beginTime = time.time()
-	print 'beginTime=%f' % beginTime
-	print "work in dir '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'"
-	sys.stdout.flush()
-	if not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"):
-		raise IOError("ERROR: temporary directory '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch' doesn't exist")
-	
-	fileSize = 0
-	if not os.path.exists("groupid"):
-		fileSize = 0.500000
-	freeGigaNeededInTmpDir = float(1 + fileSize)
-	freeSpace = os.statvfs("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if ((freeSpace.f_bavail * freeSpace.f_frsize) / 1073741824.0 < freeGigaNeededInTmpDir):
-		raise RepetException("ERROR: less than %.2fG of free space in '/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch'" % freeGigaNeededInTmpDir)
-	
-	os.chdir("/home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch")
-	if not os.path.exists("groupid"):
-		try:
-			os.mkdir("groupid")
-		except OSError, e :
-			if e.args[0] != 17:
-				raise RepetException("ERROR: can't create 'groupid'")
-		os.chdir("groupid")
-		os.system("touch bank.fa")
-	else:
-		os.chdir("groupid")
-	
-	newDir = "groupid_job1_20110505-105353"
-	if os.path.exists(newDir):
-		shutil.rmtree(newDir)
-	os.mkdir(newDir)
-	os.chdir(newDir)
-	
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "running")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	log = os.system("touch dummyFile1")
-	if log != 0:
-		raise RepetException("ERROR: job returned %i" % log)
-	else:
-		print "job finished successfully"
-	sys.stdout.flush()
-	shutil.move("dummyFile1", "/home/user/workspace/repet_pipe/commons/core/launcher/test")
-	
-	os.chdir("..")
-	shutil.rmtree(newDir)
-	
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "finished")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	
-	endTime = time.time()
-	print 'endTime=%f' % endTime
-	print 'executionTime=%f' % (endTime - beginTime)
-	print os.uname()
-	sys.stdout.flush()
-
-except IOError, e :
-	print e
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
-
-except Exception, e :
-	print "tmpDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/dummyScratch"
-	print "cDir is : /home/user/workspace/repet_pipe/commons/core/launcher/test/"
-	print e
-	if newDir != None and os.path.exists("../%s" % newDir) and not os.path.exists("/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir):
-		os.chdir("..")
-		shutil.move(newDir, "/home/user/workspace/repet_pipe/commons/core/launcher/test//%s" % newDir)
-	iJob = Job(jobname = "job1", groupid = "groupid", launcherFile = "ClusterLauncher", node = os.getenv("HOSTNAME"))
-	iDb = DbFactory.createInstance()
-	iTJA = TableJobAdaptator(iDb, "dummyJobsTable")
-	print "current status: %s" % iTJA.getJobStatus(iJob)
-	iTJA.changeJobStatus(iJob, "error")
-	print "updated status: %s" % iTJA.getJobStatus(iJob)
-	sys.stdout.flush()
-	iDb.close()
-	sys.exit(1)
--- a/commons/core/sql/DbFactory.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
- 
-from commons.core.sql.DbMySql import DbMySql
-
-class DbFactory (object):
-    
-    def createInstance(configFileName = "", verbosity = 1):
-        return DbMySql(cfgFileName = configFileName, verbosity = verbosity)
-    
-    createInstance = staticmethod(createInstance)
\ No newline at end of file
--- a/commons/core/sql/DbMySql.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,851 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-#        Exception hierarchy:
-#
-#        StandardError
-#        |__Warning
-#        |__Error
-#           |__InterfaceError
-#           |__DatabaseError
-#              |__DataError
-#              |__OperationalError
-#              |__IntegrityError
-#              |__InternalError
-#              |__ProgrammingError
-#              |__NotSupportedError
-
-import os
-import sys
-import time
-import ConfigParser
-import MySQLdb
-from MySQLdb import InterfaceError
-from MySQLdb import OperationalError
-from MySQLdb import InternalError
-from MySQLdb import DatabaseError
-from commons.core.seq.Bioseq import Bioseq
-from commons.core.LoggerFactory import LoggerFactory
-from commons.core.checker.RepetException import RepetException
-from commons.core.sql.TablePathAdaptator import TablePathAdaptator
-from commons.core.sql.TableSetAdaptator import TableSetAdaptator
-
-LOG_DEPTH = "repet.commons"
-
-TABLE_SCHEMA_DESCRIPTOR = {"map":       [("name", "varchar(255)"), ("chr", "varchar(255)"), ("start", "int"), ("end", "int")],
-                           "set":       [("path", "int unsigned"), ("name", "varchar(255)"), ("chr", "varchar(255)"), ("start", "int"), ("end", "int")],
-                           "match":     [("query_name", "varchar(255)"), ("query_start", "int"), ("query_end", "int"), ("query_length", "int unsigned"), ("query_length_perc", "float"),
-                                         ("match_length_perc", "float"), ("subject_name", "varchar(255)"), ("subject_start", "int unsigned"), ("subject_end", "int unsigned"),
-                                         ("subject_length", "int unsigned"), ("subject_length_perc", "float"), ("E_value", "double"), ("score", "int unsigned"), ("identity", "float"),
-                                         ("path", "int unsigned")],
-                           "path":      [("path", "int unsigned"), ("query_name", "varchar(255)"), ("query_start", "int"), ("query_end", "int"), ("subject_name", "varchar(255)"),
-                                         ("subject_start", "int unsigned"), ("subject_end", "int unsigned"), ("E_value", "double"), ("score", "int unsigned"), ("identity", "float")],
-                           "align":     [("query_name", "varchar(255)"), ("query_start", "int"), ("query_end", "int"), ("subject_name", "varchar(255)"), ("subject_start", "int unsigned"),
-                                         ("subject_end", "int unsigned"), ("E_value", "double"), ("score", "int unsigned"), ("identity", "float")],
-                           "seq":       [("accession", "varchar(255)"), ("sequence", "longtext"), ("description", "varchar(255)"), ("length", "int unsigned")],
-                           "length":    [("accession", "varchar(255)"), ("length", "int unsigned")],
-                           "jobs":      [("jobid", "int unsigned"), ("jobname", "varchar(255)"), ("groupid", "varchar(255)"), ("launcher", "varchar(1024)"),
-                                         ("queue", "varchar(255)"), ("resources", "varchar(255)"), ("status", "varchar(255)"), ("time", "datetime"), ("node", "varchar(255)")],
-                           "classif":   [("seq_name", "varchar(255)"), ("length", "int unsigned"), ("strand", "char"), ("status", "varchar(255)"), ("class_classif", "varchar(255)"),
-                                         ("order_classif", "varchar(255)"), ("completeness", "varchar(255)"), ("evidence", "text")],
-                           "pathstat":  [("family", "varchar(255)"), ("maxLength", "int"), ("meanLength", "int"), ("covg", "int"), ("frags", "int"), ("fullLgthFrags", "int"), ("copies", "int"),
-                                         ("fullLgthCopies", "int"), ("meanId", "varchar(255)"), ("sdId", "varchar(255)"), ("minId", "varchar(255)"), ("q25Id", "varchar(255)"), ("medId", "varchar(255)"),
-                                         ("q75Id", "varchar(255)"), ("maxId", "varchar(255)"), ("meanLgth", "varchar(255)"), ("sdLgth", "varchar(255)"), ("minLgth", "varchar(255)"), ("q25Lgth", "varchar(255)"),
-                                         ("medLgth", "varchar(255)"), ("q75Lgth", "varchar(255)"), ("maxLgth", "varchar(255)"), ("meanLgthPerc", "varchar(255)"), ("sdLgthPerc", "varchar(255)"), 
-                                         ("minLgthPerc", "varchar(255)"), ("q25LgthPerc", "varchar(255)"), ("medLgthPerc", "varchar(255)"), ("q75LgthPerc", "varchar(255)"), ("maxLgthPerc", "varchar(255)")],
-                           "info_tables":[("name", "varchar(255)"), ("file", "varchar(255)")]
-                         }
-
-TABLE_INDEX_DESCRIPTOR = {"map":       [("iname", "name"), ("ichr", "chr"), ("istart", "start"), ("iend", "end"), ("icoord", "start, end")],
-                        "set":         [("id", "path"), ("iname", "name"), ("ichr", "chr"), ("istart", "start"), ("iend", "end"), ("icoord", "start, end")],
-                        "match":       [("id", "path"), ("qname", "query_name"), ("qstart", "query_start"), ("qend", "query_end"),
-                                       ("sname", "subject_name"), ("sstart", "subject_start"), ("send", "subject_end"), ("qcoord", "query_start, query_end")],
-                        "path":        [("id", "path"), ("qname", "query_name"), ("qstart", "query_start"), ("qend", "query_end"),
-                                       ("sname", "subject_name"), ("sstart", "subject_start"), ("send", "subject_end"), ("qcoord", "query_start, query_end")],
-                        "align":       [("qname", "query_name"), ("qstart", "query_start"), ("qend", "query_end"),
-                                       ("sname", "subject_name"), ("sstart", "subject_start"), ("send", "subject_end"), ("qcoord", "query_start, query_end")],  
-                        "seq":         [("iacc", "accession"), ("idescr", "description")],  
-                        "length":      [("iacc", "accession"), ("ilength", "length")],
-                        "jobs":        [("ijobid", "jobid"), ("ijobname", "jobname"), ("igroupid", "groupid"), ("istatus", "status")],
-                        "classif":     [("iseq_name", "seq_name"), ("istatus", "status"), ("iclass", "class_classif"), ("iorder", "order_classif"), ("icomp", "completeness")],
-                        "pathstat":    [],
-                        "info_tables": []
-                        }
-
-TABLE_TYPE_SYNONYMS = {"tab": "match",
-                       "fasta": "seq",
-                       "fa": "seq",
-                       "fsa": "seq"
-                       }
- 
-## Handle connections to MySQL tables formatted for REPET
-#
-class DbMySql(object):
-    
-    ## Constructor
-    #
-    # @param user string db user name
-    # @param host string db host name
-    # @param passwd string db user password
-    # @param dbname string database name
-    # @param port integer database port
-    # @param cfgFileName string configuration file name
-    #
-    # @note when a parameter is left blank, the constructor is able
-    #   to set attribute values from environment variables: REPET_HOST,
-    #   REPET_USER, REPET_PW, REPET_DB, REPET_PORT
-    #
-    def __init__(self, user = "", host = "", passwd = "", dbname = "", port = "", cfgFileName = "", verbosity = 1):
-        self._log = LoggerFactory.createLogger("%s.%s" % (LOG_DEPTH, self.__class__.__name__), verbosity)
-        if cfgFileName != "":
-            self.setAttributesFromConfigFile(cfgFileName)
-            
-        elif host != "" and user != "" and passwd != "" and dbname != "":
-            self.host = host
-            self.user = user
-            self.passwd = passwd
-            self.dbname = dbname
-            
-        else:
-            for envVar in ["REPET_HOST","REPET_USER","REPET_PW","REPET_DB"]:
-                if os.environ.get( envVar ) == None:
-                    msg = "ERROR: can't find environment variable '%s'" % envVar
-                    self._log.error(msg)
-                    raise RepetException(msg)
-            self.host = os.environ.get("REPET_HOST")
-            self.user = os.environ.get("REPET_USER")
-            self.passwd = os.environ.get("REPET_PW")
-            self.dbname = os.environ.get("REPET_DB")
-        
-        if port != "" and cfgFileName == "":
-            self.port = int(port)
-        elif os.environ.get("REPET_PORT") != None:
-            self.port = int(os.environ.get("REPET_PORT"))
-        else:
-            self.port = 3306
-                    
-        maxNbTry = 10
-        for i in xrange(1,maxNbTry+1):
-            if not self.open():
-                time.sleep(2)
-                if i == maxNbTry:
-                    msg = "ERROR: failed to connect to the MySQL database"
-                    self._log.error(msg)
-                    raise DatabaseError(msg)
-            else:
-                break
-            
-        self.cursor = self.db.cursor()
-        self.execute("""use %s""" %(self.dbname))
-        
-        
-    ## Set the attributes from the configuration file
-    #
-    # @param configFileName string configuration file name
-    #
-    def setAttributesFromConfigFile(self, configFileName):
-        config = ConfigParser.ConfigParser()
-        config.readfp( open(configFileName) )
-        self.host = config.get("repet_env","repet_host")
-        self.user = config.get("repet_env","repet_user")
-        self.passwd = config.get("repet_env","repet_pw")
-        self.dbname = config.get("repet_env","repet_db")
-        self.port = int( config.get("repet_env","repet_port") )
-        
-        
-    ## Connect to the MySQL database
-    #
-    def open(self):
-        try:
-            if int(MySQLdb.get_client_info().split(".")[0]) >= 5:
-                self.db = MySQLdb.connect( user = self.user, host = self.host,\
-                                           passwd = self.passwd, db = self.dbname, \
-                                           port = self.port, \
-                                           local_infile = 1 )
-            else:
-                self.db = MySQLdb.connect( user = self.user, host = self.host,\
-                                           passwd = self.passwd, db = self.dbname, \
-                                           port = self.port )
-        except MySQLdb.Error, e:
-            msg = "ERROR %d: %s" % (e.args[0], e.args[1])
-            self._log.error(msg)
-            return False
-
-        return True
-    
-    
-    ## Execute a SQL query
-    #
-    # @param qry string SQL query to execute
-    # @param params parameters of SQL query 
-    #
-    def execute(self, qry, params = None, nbTry = 3, sleep = 5):
-        if nbTry:
-            self._log.debug("################START SQL DEBUG MODE################")
-            self._log.debug("Current directory: %s" % os.getcwd())
-            self._log.debug("Host: %s" % self.host)
-            self._log.debug("User: %s" % self.user)
-            self._log.debug("Database: %s" % self.dbname)
-            self._log.debug("SQL command: %s" % qry)
-            self._log.debug("################STOP SQL DEBUG MODE################\n")
-    
-            try:
-                if params == None:
-                    self.cursor.execute(qry)
-                else:
-                    self.cursor.execute(qry, params)
-            except (InterfaceError, OperationalError, InternalError) as iError:
-                self._log.error("FAILED to execute query '%s': %s. %s retries left." % (qry, iError.args[1], nbTry - 1))
-                self._log.debug("WAIT %is to execute '%s'" % (sleep, qry))
-                time.sleep(sleep)
-                try:
-                    self.close()
-                except:
-                    pass
-                self.open()
-                self.cursor = self.db.cursor()
-                self.execute(qry, params, nbTry - 1, sleep)
-        else:
-            msg = "ERROR: can't execute '%s' after several tries" % qry
-            self._log.error(msg)
-            raise DatabaseError(msg)
-        
-    ## Close the connection
-    #
-    def close( self ):
-        self.db.close()
-        
-        
-    ## Retrieve the results of a SQL query
-    #
-    def fetchall(self):
-        return self.cursor.fetchall()
-    
-    
-    ## Test if a table exists
-    #
-    # @param table string table name
-    # @return boolean True if the table exists, False otherwise
-    #
-    def doesTableExist( self, table ):
-        self.execute( """SHOW TABLES""" )
-        results = self.cursor.fetchall()
-        if (table,) in results:
-            return True
-        return False
-    
-    
-    ## Remove a table if it exists
-    #
-    # @param table string table name
-    #
-    def dropTable(self, table):
-        if self.doesTableExist( table ):
-            sqlCmd = "DROP TABLE %s" % table
-            self.execute( sqlCmd )
-            sqlCmd = 'DELETE FROM info_tables WHERE name = "%s"' % table
-            self.execute( sqlCmd )
-            
-            
-    ## Rename a table
-    #
-    # @param table string existing table name
-    # @param newName string new table name
-    #
-    def renameTable( self, table, newName ):
-        self.dropTable( newName )
-        self.execute( 'RENAME TABLE %s TO %s ;' % (table, newName) )
-        self.execute( 'UPDATE info_tables SET name="%s" WHERE name="%s";' % (newName, table) )
-        
-        
-    ## Duplicate a table
-    #
-    # @param tableName string source table name
-    # @param newTableName string new table name
-    #
-    def copyTable(self, sourcetableName, newTableName):
-        self.dropTable( newTableName )
-        sqlCmd = "CREATE TABLE %s LIKE %s;" % (newTableName, sourcetableName) 
-        self.execute( sqlCmd )
-        sqlCmd = "INSERT INTO %s SELECT * FROM %s;" % (newTableName, sourcetableName) 
-        self.execute( sqlCmd )
-        self._log.info("copying table data,", sourcetableName, "in", newTableName)
-        self.updateInfoTable(newTableName, "")
-        
-        
-    ## Give the rows number of the table
-    #
-    # @param tableName string table name
-    #
-    def getSize( self, tableName ):
-        qry = "SELECT count(*) FROM %s;" % (tableName)
-        self.execute(qry)
-        res = self.fetchall()
-        return int( res[0][0] )
-    
-    
-    def getTableType(self, tableName):
-        qry = "SHOW COLUMNS FROM %s;" % (tableName)
-        self.execute(qry)
-        res = self.fetchall()
-        
-        fieldNames = []
-        for row in res:
-            fieldNames.append(row[0])
-            
-        for tableType, fieldInfos in TABLE_SCHEMA_DESCRIPTOR.items():
-            refFieldsNames = [name for name,type in fieldInfos]
-            if refFieldsNames == fieldNames:
-                return tableType
-        
-        return None
-        
-    
-    ## Test if table is empty
-    #
-    # @param tableName string table name
-    # @return boolean True if the table is empty, False otherwise
-    #
-    def isEmpty(self, tableName):
-        return self.getSize(tableName) == 0
-    
-    
-    ## Record a new table in the 'info_table' table
-    #
-    # @param tableName string table name
-    # @param info string information on the table origin
-    #
-    def updateInfoTable( self, tableName, info ):
-        if not self.doesTableExist( "info_tables" ):
-            sqlCmd = "CREATE TABLE info_tables ( name varchar(255), file varchar(255) )"
-            self.execute( sqlCmd )
-        sqlCmd = 'INSERT INTO info_tables VALUES ("%s","%s")' % (tableName, info)
-        self.execute( sqlCmd )
-        
-        
-    ## Get a list with the fields
-    #
-    def getFieldList( self, table ):
-        lFields = []
-        sqlCmd = "DESCRIBE %s" % ( table )
-        self.execute( sqlCmd )
-        lResults = self.fetchall()
-        for res in lResults:
-            lFields.append( res[0] )
-        return lFields
-    
-    
-    ## Check that the input file has as many fields than it is supposed to according to its format
-    #
-    # @note fields should be separated by tab
-    #
-    def checkDataFormatting( self, dataType, fileName ):
-        dataType = dataType.lower()
-        if dataType in ["fa", "fasta", "seq", "classif", "length", "jobs", "pathstat"]:
-            return
-        dDataType2NbFields = { "map": 4, "set": 5, "align": 9, "path": 10, "match": 15, "tab": 15 }
-        fileHandler = open( fileName, "r" )
-        line = fileHandler.readline()
-        if line != "":
-            tokens = line.split("\t")
-            if len(tokens) < dDataType2NbFields[ dataType ]:
-                msg = "ERROR: '%s' file has less than %i fields" % ( dataType, dDataType2NbFields[ dataType ] )
-                self._log.error(msg)
-                raise RepetException(msg)
-            if len(tokens) > dDataType2NbFields[ dataType ]:
-                msg = "ERROR: '%s' file has more than %i fields" % ( dataType, dDataType2NbFields[ dataType ] )
-                self._log.error(msg)
-                raise RepetException(msg)
-        fileHandler.close()
-        
-
-    def createIndex(self, tableName="", tableType=""):
-        sqlCmd = "SHOW INDEX FROM %s;"% (tableName)
-        self.execute(sqlCmd)
-        res = self.fetchall()
-        lIndex = []
-        for i in res:
-            lIndex.append(i[2])
-        self._log.warning("existing indexes:", lIndex)
-        
-        for indexName, fieldNames in TABLE_INDEX_DESCRIPTOR.get(tableType):
-            if not indexName in lIndex:
-                sqlCmd = "CREATE INDEX %s ON %s ( %s );" % (indexName, tableName, fieldNames)
-                self.execute(sqlCmd)
-                
-    
-    ## Create a MySQL table of specified data type and load data
-    #
-    # @param tableName string name of the table to be created
-    # @param fileName string name of the file containing the data to be loaded in the table
-    # @param dataType string type of the data (map, set, align, path, match, seq, length, jobs)
-    # @param overwrite boolean (default = False)
-    #
-    def createTable(self, tableName, dataType, fileName = "", overwrite = False):
-        self._log.info("creating table '%s' from file '%s' of type '%s'..." % (tableName, fileName, dataType))
-            
-        if fileName != "":
-            self.checkDataFormatting(dataType, fileName)
-            
-        if overwrite:
-            self.dropTable(tableName)
-                    
-        tableType = dataType.lower()
-        if TABLE_SCHEMA_DESCRIPTOR.get(tableType,None) is None and TABLE_TYPE_SYNONYMS.get(tableType,None) is None:
-            msg = "ERROR: unknown type %s" % dataType
-            self._log.error(msg)
-            raise RepetException(msg)
-            
-        tableType = TABLE_TYPE_SYNONYMS.get(tableType,tableType)
-        
-        fields = [" ".join(fieldDescription) for fieldDescription in TABLE_SCHEMA_DESCRIPTOR.get(tableType)]
-        sqlCmd = "CREATE TABLE %s (%s)" % (tableName, ",".join(fields))
-        self.execute(sqlCmd)
-        self.createIndex(tableName, tableType)
-        
-        tmpFileName = ""
-        if fileName:
-            if tableType == "seq":
-                tmpFileName = "%s.seq" % os.path.basename(fileName)
-                self._convertFastaToSeq(fileName, tmpFileName)
-                fileName = tmpFileName
-            elif tableType == "length":
-                tmpFileName = "%s.length" % os.path.basename(fileName)
-                self._convertFastaToLength(fileName, tmpFileName)
-                fileName = tmpFileName
-        
-        hasHeaderLine = tableType == "match" or tableType == "pathstat"
-        self.loadDataFromFile(tableName, fileName, hasHeaderLine)
-        if tmpFileName:
-            os.remove(tmpFileName)
-        
-        if tableType == "path":
-            self.changePathQueryCoordinatesToDirectStrand( tableName )
-        
-        self.updateInfoTable(tableName, fileName)
-        self._log.info("creating table '%s' done!" % tableName)
-    
-
-    ## Create a bin table for fast access
-    #
-    # @param pathTableName string path table name (input table)
-    # @param idxTableName string bin path table name (output table)
-    # @param overwrite boolean default = False
-    #    
-    def createBinPathTable(self, pathTableName, overwrite = False):
-        idxTableName = "%s_idx" % pathTableName # is an attribute in TableBinPathAdaptator
-        if not self.doesTableExist(pathTableName):
-            msg = "ERROR: '%s' doesn't exist => '%s' can't be created" % (pathTableName, idxTableName)
-            self._log.error(msg)
-            raise RepetException(msg)
-        self._log.info("creating %s for fast access" % idxTableName)
-        if overwrite:
-            self.dropTable(idxTableName)
-            
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, idx int unsigned, contig varchar(255), min int, max int, strand int unsigned)" % idxTableName
-        self.execute(sqlCmd)
-
-        sqlCmd = "CREATE INDEX id ON %s ( path );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX ibin ON %s ( idx );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX icontig ON %s ( contig );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX imin ON %s ( min );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX imax ON %s ( max );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX istrand ON %s ( strand );" % idxTableName
-        self.execute(sqlCmd)
-
-        tmpTableName = "%s_tmp" % pathTableName
-        self._createPathTableAndGroupByIdAndOrderByStrand(pathTableName, tmpTableName)
-        iTPA = TablePathAdaptator(self, tmpTableName)
-        if not self.isEmpty(tmpTableName):
-            tmpFileName = "%s.tmp%s" % (pathTableName, str(os.getpid()))
-            with open(tmpFileName, "w") as f:
-                lQueryNames = iTPA.getQueryList()
-                for queryName in lQueryNames:
-                    lPaths = iTPA.getPathListFromQuery(queryName)
-                    for i in lPaths:
-                        idx = i.range_query.findIdx()
-                        max = i.range_query.getMax()
-                        min = i.range_query.getMin()
-                        strand = i.range_query.isOnDirectStrand()
-                        f.write("%d\t%d\t%s\t%d\t%d\t%d\n"%(i.id, idx, i.range_query.seqname, min, max, strand))
-            sqlCmd="LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS ESCAPED BY '' " % (tmpFileName, idxTableName)
-            self.execute(sqlCmd)
-            self.updateInfoTable(idxTableName, "%s bin indexes" % pathTableName)
-            os.remove(tmpFileName)
-        self.dropTable(tmpTableName)
-        
-    
-    ## This table summarize the Path list information according to the identifier numbers. The min and max value is taken
-    #
-    def _createPathTableAndGroupByIdAndOrderByStrand(self, pathTableName, outTableName):
-        self.dropTable(outTableName)
-
-        sqlcmd="CREATE TABLE %s SELECT path, query_name, min(query_start) AS query_start, max(query_end) AS query_end, subject_name, min(subject_start) AS subject_start, max(subject_end) AS subject_end, min(e_value) AS e_value, sum(score) AS score, avg(identity) AS identity FROM %s WHERE query_start<query_end and subject_start<subject_end group by path;" % (outTableName, pathTableName)
-        self.execute(sqlcmd)
-
-        sqlcmd="INSERT INTO %s SELECT path, query_name, min(query_start) AS query_start, max(query_end) AS query_end, subject_name, max(subject_start) AS subject_start, min(subject_end) AS subject_end, min(e_value) AS e_value, sum(score) AS score, avg(identity) AS identity FROM %s WHERE query_start<query_end and subject_start>subject_end group by path;" % (outTableName, pathTableName)
-        self.execute(sqlcmd)
-
-        sqlcmd="INSERT INTO %s SELECT path, query_name, max(query_start) AS query_start, min(query_end) AS query_end, subject_name, min(subject_start) AS subject_start, max(subject_end) AS subject_end, min(e_value) AS e_value, sum(score) AS score, avg(identity) AS identity FROM %s WHERE query_start>query_end and subject_start<subject_end group by path;" % (outTableName, pathTableName)
-        self.execute(sqlcmd)
-
-        sqlcmd="INSERT INTO %s SELECT path, query_name, max(query_start) AS query_start, min(query_end) AS query_end, subject_name, max(subject_start) AS subject_start, min(subject_end) AS subject_end, min(e_value) AS e_value, sum(score) AS score, avg(identity) AS identity FROM %s WHERE query_start>query_end and subject_start>subject_end group by path;" % (outTableName, pathTableName)
-        self.execute(sqlcmd)
-
-        self.createIndex(outTableName, "path")
-
-
-    ## Create a bin table for fast access
-    #
-    # @param setTableName string set table name (input table)
-    # @param idxTableName string bin set table name (output table)
-    # @param overwrite boolean default = False
-    #
-    def createBinSetTable(self, setTableName, overwrite = False):
-        idxTableName = "%s_idx" % setTableName # is an attribute in TableBinSetAdaptator
-        if not self.doesTableExist(setTableName):
-            msg = "ERROR: '%s' doesn't exist => '%s' can't be created" % (setTableName, idxTableName)
-            self._log.error(msg)
-            raise RepetException(msg)
-        self._log.info("creating %s for fast access" % idxTableName)
-        if overwrite:
-            self.dropTable(idxTableName)
-        
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, bin float, contig varchar(255), min int, max int, strand int unsigned)" % idxTableName
-        self.execute(sqlCmd)
-        
-        sqlCmd = "CREATE INDEX id ON %s ( path );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX ibin ON %s ( bin );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX icontig ON %s ( contig );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX imin ON %s ( min );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX imax ON %s ( max );" % idxTableName
-        self.execute(sqlCmd)
-        sqlCmd = "CREATE INDEX istrand ON %s ( strand );" % idxTableName
-        self.execute(sqlCmd)
-
-        tmpTableName = "%s_tmp" % setTableName
-        self._createSetTableAndGroupByIdAndOrderByStrand(setTableName, tmpTableName)
-        iTSA = TableSetAdaptator(self, tmpTableName)
-        if not self.isEmpty(tmpTableName):
-            tmpFileName = "%s.tmp%s" % (setTableName, str(os.getpid()))
-            with open(tmpFileName, "w") as f:
-                lSeqNames = iTSA.getSeqNameList()
-                for seqName in lSeqNames:
-                    lSets = iTSA.getSetListFromSeqName(seqName)
-                    for i in lSets:
-                        bin = i.getBin()
-                        max = i.getMax()
-                        min = i.getMin()
-                        strand = i.isOnDirectStrand()
-                        f.write("%d\t%f\t%s\t%d\t%d\t%d\n"%(i.id, bin, i.seqname, min, max, strand))
-            sqlCmd="LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS ESCAPED BY '' " % (tmpFileName, idxTableName)
-            self.execute(sqlCmd)
-            self.updateInfoTable(idxTableName, "%s bin indexes" % setTableName)
-            os.remove(tmpFileName)
-        self.dropTable(tmpTableName)
-        
-        
-    ## This table summarize the Set list information according to the identifier numbers. The min and max value is taken
-    #
-    def _createSetTableAndGroupByIdAndOrderByStrand(self, setTableName, outTableName):
-        self.dropTable(outTableName)
-
-        sqlcmd="CREATE TABLE %s SELECT path, name, chr, min(start) AS start, max(end) AS end FROM %s WHERE start<end group by path;" % (outTableName, setTableName)
-        self.execute(sqlcmd)
-
-        sqlcmd="INSERT INTO %s SELECT path, name, chr, max(start) AS start, min(end) AS end FROM %s WHERE start>end group by path;" % (outTableName, setTableName)
-        self.execute(sqlcmd)
-
-        self.createIndex(outTableName, "set")
-
-                   
-    ## Load data from a file into a MySQL table
-    #
-    # @param tableName string table name
-    # @param fileName string file name
-    # @param escapeFirstLine boolean True to ignore the first line of file, False otherwise 
-    #
-    def loadDataFromFile(self, tableName, fileName, escapeFirstLine = False):
-        if fileName != "":
-            sqlCmd = "LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS ESCAPED BY '' " % ( fileName, tableName )
-            if escapeFirstLine == True:
-                sqlCmd = "%s IGNORE 1 LINES" %(sqlCmd)
-            self.execute( sqlCmd )
-
-        self._log.info("%i entries in the table %s" % (self.getSize(tableName), tableName))
-        
-######################################################################################
-#TODO: remove duplication with same methods in fastautils
-    ## Convert a fasta file to a length file
-    #
-    # @param inFile string name of the input fasta file
-    # @param outFile string name of the output file
-    #
-    def _convertFastaToLength(self, inFile, outFile = ""):
-        if outFile == "":
-            outFile = "%s.length" % inFile
-        
-        if inFile != "":
-            with open(inFile, "r") as inFH:
-                with open(outFile, "w") as outFH:
-                    bioseq = Bioseq()
-                    while True:
-                        bioseq.read(inFH)
-                        if bioseq.sequence == None:
-                            break
-                        seqLen = bioseq.getLength()
-                        outFH.write("%s\t%d\n" % (bioseq.header.split()[0], seqLen))
-    
-    
-    ## Convert a fasta file to a seq file
-    #
-    # @param inFile string name of the input fasta file
-    # @param outFile string name of the output file
-    #
-    def _convertFastaToSeq(self, inFile, outFile = ""):
-        if outFile == "":
-            outFile = "%s.seq" % inFile
-        
-        if inFile != "":
-            with open(inFile, "r") as inFH:
-                with open(outFile, "w") as outFH:
-                    bioseq = Bioseq()
-                    while True:
-                        bioseq.read(inFH)
-                        if bioseq.sequence == None:
-                            break
-                        seqLen = bioseq.getLength()
-                        outFH.write("%s\t%s\t%s\t%d\n" % (bioseq.header.split()[0], \
-                                                bioseq.sequence, bioseq.header, seqLen))
-
-######################################################################################
-            
-    ## Change the coordinates such that the query is on the direct strand.
-    #
-    # @param inTable string path table name to update
-    #    
-    def changePathQueryCoordinatesToDirectStrand( self, inTable ):
-        sqlCmd = "ALTER TABLE %s ADD COLUMN tmpid INT NOT NULL AUTO_INCREMENT PRIMARY KEY" % ( inTable )
-        self.execute( sqlCmd )
-        
-        tmpTable = "%s_tmp" % ( inTable )
-        sqlCmd = "CREATE TABLE %s SELECT * FROM %s WHERE query_start > query_end" % ( tmpTable, inTable )
-        self.execute( sqlCmd )
-        
-        sqlCmd = "UPDATE %s, %s" % ( inTable, tmpTable )
-        sqlCmd += " SET %s.query_start=%s.query_end," % ( inTable, tmpTable )
-        sqlCmd += " %s.query_end=%s.query_start," % ( inTable, tmpTable )
-        sqlCmd += " %s.subject_start=%s.subject_end," % ( inTable, tmpTable )
-        sqlCmd += " %s.subject_end=%s.subject_start" % ( inTable, tmpTable )
-        sqlCmd += " WHERE %s.tmpid=%s.tmpid" % ( inTable, tmpTable )
-        self.execute( sqlCmd )
-        
-        sqlCmd = "ALTER TABLE %s DROP COLUMN tmpid" % ( inTable )
-        self.execute( sqlCmd )
-        self.dropTable( tmpTable )
-        
-        
-    ## Export data from a table in a file.
-    #
-    # @param tableName string table name 
-    # @param outFileName string output file name
-    # @param keepFirstLine boolean if you want the first line (column name) in output file
-    # @param param string sql parameters to select data expected 
-    #
-    def exportDataToFile( self, tableName, outFileName="", keepFirstLine=False, param="" ):
-        if outFileName == "": outFileName = tableName
-        prg = "mysql"
-        cmd = prg
-        cmd += " -h %s" % ( self.host )
-        cmd += " -u %s" % ( self.user )
-        cmd += " -p\"%s\"" % ( self.passwd )
-        cmd += " --database=%s" % ( self.dbname )
-        cmd += " -e\"SELECT * FROM %s" % ( tableName )
-        if param != "": cmd += " %s" % ( param )
-        cmd += ";\""
-        cmd += " > "
-        if keepFirstLine == False:
-            cmd += "%s.tmp" % ( outFileName )
-        else:
-            cmd += "%s" % ( outFileName )
-        log = os.system( cmd )
-        if log != 0: print "ERROR: mysql returned %i" % ( log ); sys.exit(1)
-    
-        if keepFirstLine == False:
-            tmpFileName = "%s.tmp" % ( outFileName )
-            tmpFile = open( tmpFileName, "r" )
-            outFile = open( outFileName, "w" )
-            i = 0
-            for line in tmpFile:
-                if i > 0:
-                    outFile.write( line )
-                i += 1
-            tmpFile.close()
-            outFile.close()
-            os.remove( tmpFileName )
-            
-            
-    ## Convert a Path table into an Align table
-    #
-    # @param inPathTable string name of the input Path table
-    # @param outAlignTable string name of the output Align table
-    #
-    def convertPathTableIntoAlignTable( self, inPathTable, outAlignTable ):
-        sqlCmd = "CREATE TABLE %s SELECT query_name,query_start,query_end,subject_name,subject_start,subject_end,E_value,score,identity FROM %s;" % ( outAlignTable, inPathTable )
-        self.execute( sqlCmd )
-        self.updateInfoTable( outAlignTable, "" )
-        
-    
-    ## Create a set table from a map table
-    #
-    # @param mapTableName string map table name
-    # @param setTableName string new set table name
-    #
-    def convertMapTableIntoSetTable( self, mapTableName, setTableName ):
-        sqlCmd = "CREATE TABLE %s (path int(10) unsigned auto_increment primary key) select name, chr, start, end from %s;" % (setTableName, mapTableName)
-        self.execute(sqlCmd)
-        self.createIndex(setTableName, "set")
-    
-    
-    ## Convert an Align table into a Path table
-    #
-    # @param inAlignTable string name of the input Align table
-    # @param outPathTable string name of the output Path table
-    #
-    def convertAlignTableIntoPathTable( self, inAlignTable, outPathTable ):
-        self.createTable( outPathTable, "path", "", True )
-        sqlCmd = "SELECT * FROM %s" % ( inAlignTable )
-        self.execute( sqlCmd )
-        lResults = self.fetchall()
-        rowIndex = 0
-        for res in lResults:
-            rowIndex += 1
-            sqlCmd = "INSERT INTO %s" % ( outPathTable )
-            sqlCmd += " (path,query_name,query_start,query_end,subject_name,subject_start,subject_end,E_value,score,identity)"
-            sqlCmd += " VALUES ( '%i'" % ( rowIndex )
-            for i in res:
-                sqlCmd += ', "%s"' % ( i )
-            sqlCmd += " )"
-            self.execute( sqlCmd )
-        self.updateInfoTable( outPathTable, "" )
-        
-        
-    ## Give a list of instances according to the SQL command
-    #
-    # @param SQLCmd string is a SQL command
-    # @param methodGetInstance2Adapt a getter method name. With this method you choose the type of intances contained in lObjs. See example in Test_DbMySql.py.
-    # @return lObjs list of instances
-    #
-    def getObjectListWithSQLCmd( self, SQLCmd,  methodGetInstance2Adapt):
-        self.execute( SQLCmd )
-        res = self.fetchall()
-        lObjs = []
-        for t in res:
-            iObj = methodGetInstance2Adapt()
-            iObj.setFromTuple( t )
-            lObjs.append( iObj )
-        return lObjs
-    
-    
-    ## Give a list of integer according to the SQL command
-    #
-    # @param sqlCmd string is a SQL command
-    # @return lInteger integer list
-    #
-    def getIntegerListWithSQLCmd( self, sqlCmd ):
-        self.execute(sqlCmd)
-        res = self.fetchall()
-        lInteger = []
-        for t in res:
-            if t[0] != None:
-                lInteger.append(int(t[0]))
-        return lInteger
-    
-    
-    ## Give a int according to the SQL command
-    #
-    # @param sqlCmd string is a SQL command
-    # @return nb integer 
-    #
-    def getIntegerWithSQLCmd( self, sqlCmd ):
-        self.execute(sqlCmd)
-        res = self.fetchall()
-        nb = res[0][0]
-        if nb == None:
-            nb = 0
-        return nb
-    
-    
-    ## Give a list of str according to the SQL command
-    #
-    # @param sqlCmd string is a SQL command
-    # @return lString str list
-    #
-    def getStringListWithSQLCmd( self, sqlCmd ):
-        self.execute(sqlCmd)
-        res = self.fetchall()
-        lString = []
-        for i in res:
-            lString.append(i[0])
-        return lString
-    
-#TODO: use API to add indexes
-    ## Remove doublons in a given table
-    #
-    # @param table string name of a MySQL table
-    #
-    def removeDoublons( self, table ):
-        tmpTable = "%s_%s" % ( table, time.strftime("%Y%m%d%H%M%S") )
-        sqlCmd = "CREATE TABLE %s SELECT DISTINCT * FROM %s" % ( tmpTable, table )
-        self.execute( sqlCmd )
-        self.dropTable( table )
-        self.renameTable(tmpTable, table)
-        
-        
-    ## Get a list of table names from a pattern
-    #
-    # @note for instance pattern = 'MyProject_%'
-    #
-    def getTableListFromPattern( self, pattern ):
-        if pattern == "*" or pattern == "%":
-            sqlCmd = "SHOW TABLES"
-        else:
-            sqlCmd = "SHOW TABLES like '%s'" % ( pattern )
-        lTables = self.getStringListWithSQLCmd( sqlCmd )
-        return lTables
--- a/commons/core/sql/DbSQLite.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,173 +0,0 @@
-import sqlite3
-import os
-import sys
-
-#TODO: update...compare with DbMySql.py
-class DbSQLite(object):
-    
-    ## Constructor
-    #
-    # @param host string db file path
-    # @param cfgFileName string configuration file name
-    #
-    # @note when a parameter is left blank, the constructor is able
-    #   to set attribute values from environment variable: REPET_HOST,
-    #   
-    def __init__(self, host = ""):
-        if host != "":
-            self.host = host
-        else:
-            msg = "ERROR: no host specified"
-            sys.stderr.write( "%s\n" % msg )
-            sys.exit(1)
-        # remove open() and cursor from init() use directly outside this class ...
-        self.open()
-        self.cursor = self.db.cursor()
-    
-    ## Connect to the DbSQLite database
-    #
-    # @param verbose integer (default = 0)
-    #
-    def open( self, verbose = 0, nb = 0 ):
-        try:
-            #sqlite.connect(":memory:", check_same_thread = False)
-            self.db = sqlite3.connect(self.host, check_same_thread= False, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES)
-        except sqlite3.Error, e:
-            if verbose > 0:
-                print "ERROR %s" % e
-                sys.stdout.flush()
-            return False
-        return True
-    
-    ## Execute a SQL query
-    #
-    # @param qry string SQL query to execute
-    # @param params parameters of SQL query 
-    #
-    def execute( self, qry, params=None ):
-        try : 
-            if params == None:
-                self.cursor.execute( qry )
-            else:
-                self.cursor.execute( qry, params )
-        except Exception, e:
-            #TODO Must be test 
-            try : 
-                if params == None:
-                    self.cursor.execute( qry )
-                else:
-                    self.cursor.execute( qry, params )
-            except Exception, e:
-                    print "Erreur : %s" % e
-                    
-    ## Retrieve the results of a SQL query
-    #  
-    def fetchall(self):
-        return self.cursor.fetchall()
-    
-    ## Record a new table in the 'info_table' table
-    #
-    # @param tableName string table name
-    # @param info string information on the table origin
-    #
-    def updateInfoTable( self, tableName, info ):
-        if not self.doesTableExist( "info_tables" ):
-            sqlCmd = "CREATE TABLE info_tables ( name varchar(255), file varchar(255) )"
-            self.execute( sqlCmd )
-        sqlCmd = 'INSERT INTO info_tables VALUES ("%s","%s")' % (tableName, info)
-        self.execute( sqlCmd )
-   
-    def createTable(self, tableName, dataType, overwrite=False, verbose=0):
-        if verbose > 0:
-            print "creating table '%s' from file '%s' of type '%s'..." % (tableName, dataType)
-            sys.stdout.flush()
-        if overwrite:
-            self.dropTable(tableName)   
-        if dataType.lower() in ["job", "jobs"]:
-            self.createJobTable(tableName)
-        else:
-            print "ERROR: unknown type %s" % (dataType)
-            self.close()
-            sys.exit(1)
-        if verbose > 0:
-            print "done!"; sys.stdout.flush()
-    
-    ## Create a job table
-    #
-    # @param tablename new table name
-    #
-    def createJobTable( self, tablename ):
-        sqlCmd = "CREATE TABLE %s" % ( tablename )
-        sqlCmd += " ( jobid INT UNSIGNED"
-        sqlCmd += ", jobname VARCHAR(255)"
-        sqlCmd += ", groupid VARCHAR(255)"
-        sqlCmd += ", command TEXT"
-        sqlCmd += ", launcher VARCHAR(1024)"
-        sqlCmd += ", queue VARCHAR(255)"
-        sqlCmd += ", status VARCHAR(255)"
-        sqlCmd += ", time timestamp"
-        sqlCmd += ", node VARCHAR(255) )"
-        self.execute( sqlCmd )
-        
-        self.updateInfoTable( tablename, "job table" )
-        sqlCmd = "CREATE INDEX igroupid ON " + tablename + " ( groupid )"
-        self.execute( sqlCmd )
-    
-    ## Test if a table exists
-    #
-    # @param table string table name
-    # @return boolean True if the table exists, False otherwise
-    #       
-    def doesTableExist( self, table ):
-        qry = "PRAGMA table_info(%s)" % (table)
-        self.execute( qry )
-        results = self.cursor.fetchall()
-        if results:
-            return True
-        return False
-    
-    def isEmpty( self, tableName ):
-        return self.getSize( tableName ) == 0
-    
-    ## Give the rows number of the table
-    #
-    # @param tableName string table name
-    #
-    def getSize( self, tableName ):
-        qry = "SELECT count(*) FROM %s;" % ( tableName )
-        self.execute( qry )
-        res = self.fetchall()
-        return int( res[0][0] )
-    
-    ## Remove a table if it exists
-    #
-    # @param table string table name
-    # @param verbose integer (default = 0)
-    #
-    def dropTable( self, table, verbose = 0 ):
-        if self.doesTableExist( table ):
-            sqlCmd = "DROP TABLE %s" % ( table )
-            self.execute( sqlCmd )
-            sqlCmd = 'DELETE FROM info_tables WHERE name = "%s"' % ( table )
-            self.execute( sqlCmd )
-            
-    ## Get a list with the fields
-    #                    
-    def getFieldList( self, table ):
-        lFields = []
-        sqlCmd = "PRAGMA table_info(%s)" % ( table )
-        self.execute( sqlCmd )
-        lResults = self.fetchall()
-        for res in lResults:
-            lFields.append( res[1] )
-        return lFields
-    
-    ## delete this SQLite database session
-    #
-    def delete(self):
-        os.remove(self.host)
-        
-    ## Close the connection
-    #   
-    def close( self ):
-        self.db.close()
--- a/commons/core/sql/ITableMapAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,113 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-## Interface for TableMapAdaptator 
-#
-class ITableMapAdaptator(object):
-  
-    ## Insert a map instance
-    #
-    # @param obj map or set
-    # @param delayed boolean must the insert be delayed 
-    #
-    # @warning old name was insAMap
-    #
-    def insert(self, obj, delayed=False):
-        pass
-
-        
-    ## Insert a list of Map or Set or Match instances
-    #
-    # @param l a list of object instances
-    # @param delayed boolean
-    #
-    # @warning old name was insMapList
-    #
-    def insertList(self, l, delayed = False):
-        pass
-    
-    ## Give a list of the distinct seqName/chr present in the table
-    #
-    # @return lDistinctContigNames string list
-    #
-    # @warning old name was getContig_name
-    #
-    def getSeqNameList(self):
-        pass
-    
-    
-    ## Give a list of Map instances having a given seq name
-    #
-    # @param seqName string seq name
-    # @return lMap list of instances
-    #
-    # @warning old name was get_MapList_from_contig
-    #
-    def getMapListFromSeqName(self, seqName):
-        pass
-    
-    
-    ## Return a list of Set instances from a given sequence name
-    #
-    # @param seqName string sequence name
-    # @return lSets list of Set instances
-    #
-    # @warning old name was getSetList_from_contig 
-    #
-    def getSetListFromSeqName( self, seqName ):
-        pass
-
-    
-    ## Give a map instances list overlapping a given region
-    #
-    # @param seqName string seq name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lMap list of map instances
-    #
-    # @warning old name was getMapList_from_qcoord
-    #
-    def getMapListOverlappingCoord(self, seqName, start, end):
-        pass
-    
-    
-    ## Return a list of Set instances overlapping a given region
-    #   
-    # @param seqName string sequence name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    # @warning old name was getSetList_from_qcoord
-    #
-    def getSetListOverlappingCoord( self, seqName, start, end ):
-        pass
-    
\ No newline at end of file
--- a/commons/core/sql/ITableMatchAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-## Interface for TableMatchAdaptator
-#
-class ITableMatchAdaptator(object):
-        
-    ## Give a list of Match instances given a query name
-    #
-    # @param query string sequence name
-    # @return lMatches list of Match instances
-    #
-    def getMatchListFromQuery( self, query ):
-        pass
-    
-    ## Give a list of Match instances having the same identifier
-    #
-    # @param id integer identifier number
-    # @return lMatch a list of Match instances
-    #
-    def getMatchListFromId( self, id ):
-        pass
-    
-    ## Insert a Match instance
-    #
-    # @param iMatch a Match instance
-    # @param delayed boolean
-    #
-    def insert(self, iMatch, delayed = False):
-        pass  
-        
-    ## Insert a list of Map or Set or Match instances
-    #
-    # @param l a list of object instances
-    # @param delayed boolean
-    #
-    # @warning old name was insMapList
-    #
-    def insertList(self, l, delayed = False):
-        pass
\ No newline at end of file
--- a/commons/core/sql/ITablePathAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,429 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-## Interface for TablePathAdaptator
-#
-class ITablePathAdaptator (object):
-
-    ## Give the data contained in the table as a list of Path instances
-    #
-    # @return lPaths list of path instances
-    #
-    def getListOfAllPaths( self ):
-        pass
-    
-    ## Give a list of Path instances having the same identifier
-    #
-    # @param id integer identifier number
-    # @return lPath a list of Path instances
-    #
-    # @warning old name was getPathList_from_num
-    #
-    def getPathListFromId( self, id ):
-        pass
-
-    ## Give a list of Path instances according to the given list of identifier numbers
-    #
-    # @param lId integer list 
-    # @return lPath a list of Path instances
-    #
-    # @warning old name was getPathList_from_numlist
-    #
-    def getPathListFromIdList( self, lId ):
-        pass
-        
-    ## Give a list of Path instances having the same given query name
-    #
-    # @param query string name of the query 
-    # @return lPath a list of Path instances
-    #
-    # @warning old name was getPathList_from_query
-    #
-    def getPathListFromQuery( self, query ):
-        pass
-    
-    ## Give a list with all the distinct identifiers corresponding to the query
-    #
-    # @param query string name of the query 
-    # @return lId a list of integer
-    #
-    # @warning old name was getPathList_from_query
-    #
-    def getIdListFromQuery( self, query ):
-        pass
-    
-    ## Give a list with all the distinct identifiers corresponding to the subject
-    #
-    # @param subject string name of the subject 
-    # @return lId a list of integer
-    #
-    # @warning old name was getPathList_from_subject
-    #
-    def getIdListFromSubject( self, subject ):
-        pass
-    
-    ## Insert a path instance
-    #
-    # @param obj a path instance
-    # @param delayed boolean indicating if the insert must be delayed
-    #
-    # @note data are inserted such that the query is always on the direct strand
-    #
-    # @warning old name was insAPath
-    #
-    def insert(self, obj, delayed = False):
-        pass
-    
-    ## Insert a list of Path instances
-    #
-    # @param l a list of Path instances
-    # @param delayed boolean
-    #
-    # @warning old name was insPathList
-    #
-    def insertList(self, l, delayed = False):
-        pass
-    
-    ## Give a list of the identifier number contained in the table
-    #
-    # @return l integer list
-    #
-    # @warning old name was getPath_num
-    #
-    def getIdList(self):
-        pass
-    
-    ## Give a list of Path instances having the same given subject name
-    #
-    # @param subject string name of the subject 
-    # @return lPath a list of Path instances
-    #
-    # @warning old name was getPath_num
-    #
-    def getPathListFromSubject( self, subject ):
-        pass
-    
-    ## Give a list of the distinct subject names present in the table
-    #
-    # @return lDistinctTypeNames string list
-    #
-    # @warning old name was getListDistinctSubjectName
-    #
-    def getSubjectList(self):
-        pass
-    
-    ## Give a list of the distinct query names present in the table
-    #
-    # @return lDistinctQueryNames string list
-    #
-    # @warning old name was getListDistinctQueryName
-    #
-    def getQueryList(self):
-        pass
-    
-    ## Give a list of Set instance list from the path contained on a query name
-    #
-    # @param queryName string query name
-    # @return lSet list of set instance 
-    #
-    def getSubjectListFromQuery (self, queryName):
-        pass
-    
-    ## Give a list of Path instances with the given query and subject, both on direct strand
-    #
-    # @param query string query name
-    # @param subject string subject name
-    # @return lPaths list of path instances
-    #
-    # @warning old name was getListPathsWithDirectQueryDirectSubjectPerQuerySubject
-    #
-    def getPathListWithDirectQueryDirectSubjectFromQuerySubject( self, query, subject ):
-        pass
-    
-    ## Give a list of Path instances with the given query on direct strand and the given subject on reverse strand
-    #
-    # @param query string query name
-    # @param subject string subject name
-    # @return lPaths list of path instances
-    #
-    # @warning old name was getListPathsWithDirectQueryReverseSubjectPerQuerySubject
-    #
-    def getPathListWithDirectQueryReverseSubjectFromQuerySubject( self, query, subject ):
-        pass
-    
-    ## Give the number of Path instances with the given query name
-    #
-    # @param query string query name
-    # @return pathNb integer the number of Path instances
-    #
-    # @warning old name was getNbPaths_from_query
-    #
-    def getNbPathsFromQuery( self, query ):
-        pass
-    
-    ## Give the number of Path instances with the given subject name
-    #
-    # @param subject string subject name
-    # @return pathNb integer the number of Path instances
-    #
-    # @warning old name was getNbPaths_from_subject
-    #
-    def getNbPathsFromSubject( self, subject ):
-        pass
-    
-    ## Give the number of distinct path identifiers
-    #
-    # @return idNb integer the number of Path instances
-    #
-    # @warning old name was getNbAllPathsnums
-    #
-    def getNbIds( self ):
-        pass
-    
-    ## Give the number of distinct path identifiers for a given subject
-    #
-    # subjectName string subject name
-    # @return idNb integer the number of Path instances
-    #
-    # @warning old name was getNbPathsnums_from_subject
-    #
-    def getNbIdsFromSubject( self, subjectName ):
-        pass
-    
-    ## Give the number of distinct path identifiers for a given query
-    #
-    # @param queryName string query name
-    # @return idNb integer the number of Path instances
-    #
-    # @warning old name was getNbPathsnums_from_query
-    #
-    def getNbIdsFromQuery( self, queryName ):
-        pass
-    
-    ## Give a list of Path instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lPath list of Path instances
-    #
-    def getPathListOverlappingQueryCoord( self, query, start, end ):
-        pass
-    
-    ## Give a list of Set instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    # @warning old name was getSetList_from_qcoord
-    #
-    def getSetListOverlappingQueryCoord(self, query, start, end):
-        pass
-
-    ## Give a list of Path instances included in a given query region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lPaths list of Path instances
-    #
-    # @warning old name was getIncludedPathList_from_qcoord
-    #
-    def getPathListIncludedInQueryCoord( self, query, start, end ):
-        pass
-    
-    ## Give a list of Set instances included in a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    # @warning old name was getInSetList_from_qcoord
-    #
-    def getSetListIncludedInQueryCoord(self, query, start, end):
-        pass
-    
-    ## Give a a list of Path instances sorted by query coordinates
-    #
-    # @return lPaths list of Path instances
-    #
-    # @warning old name was getListOfPathsSortedByQueryCoord
-    #
-    def getPathListSortedByQueryCoord( self ):
-        pass
-    
-    ## Give a a list of Path instances sorted by query coordinates for a given query
-    #
-    # @param queryName string query name
-    # @return lPaths list of Path instances
-    #
-    def getPathListSortedByQueryCoordFromQuery( self, queryName ):
-        pass
-    
-    ## Give a list of path instances sorted by increasing E-value
-    #
-    # queryName string query name
-    # @return lPaths list of path instances
-    #
-    def getPathListSortedByIncreasingEvalueFromQuery( self, queryName ):
-        pass
-
-    ## Give a cumulative length of all paths (fragments) for a given subject name
-    #
-    # @param subjectName string subject name
-    # @return nb Cumulative length for all path
-    # @warning doesn't take into account the overlaps !!
-    # @warning old name was getCumulPathLength_from_subject
-    #  
-    def getCumulLengthFromSubject( self, subjectName ):
-        pass
-    
-    ## Give a list of the length of all chains of paths for a given subject name
-    #
-    # @param subjectName string  name of the subject
-    # @return lChainLengths list of lengths per chain of paths
-    # @warning doesn't take into account the overlaps !!
-    # @warning old name was getListChainLength_from_subject
-    #
-    def getChainLengthListFromSubject( self, subjectName ):
-        pass
-
-    ## Give a list of identity of all chains of paths for a given subject name
-    #
-    # @param subjectName string name of the subject
-    # @return lChainIdentities list of identities per chain of paths
-    # @warning doesn't take into account the overlaps !!
-    # @warning old name was getListChainIdentity_from_subject
-    # 
-    def getChainIdentityListFromSubject( self, subjectName ):
-        pass
-    
-    ## Give a list of Path lists sorted by weighted identity.
-    #
-    # @param qry query name
-    # @return lChains list of chains
-    #
-    def getListOfChainsSortedByAscIdentityFromQuery( self, qry ):
-        pass
-    
-    ## Give a list of the length of all paths for a given subject name
-    #
-    # @param subjectName string name of the subject
-    # @return lPathLengths list of lengths per path
-    # @warning doesn't take into account the overlaps !!
-    # @warning old name was getListPathLength_from_subject
-    #
-    def getPathLengthListFromSubject( self, subjectName ):
-        pass
-    
-    ## Give a a list with all distinct identifiers for a given subject sorted in decreasing order according to the length of the chains
-    #    
-    # @return lPathNums a list of paths Id
-    #
-    # @warning old name was getPathNumListSortedByDecreasingChainLengthFromSubject
-    #
-    def getIdListSortedByDecreasingChainLengthFromSubject( self, subjectName ):
-        pass
-    
-    ## Give a list of Set instance list from the path contained on a query name
-    #
-    # @param query string query name
-    # @return lSet list of set instance 
-    #
-    # @warning old name was getSetList_from_contig
-    #
-    def getSetListFromQuery(self, query):
-        pass
-    
-    ## Delete path corresponding to a given identifier number
-    #
-    # @param id integer identifier number
-    #
-    # @warning old name was delPath_from_num
-    #
-    def deleteFromId(self,id):
-        pass
-    
-    ## Delete path corresponding to a given list of identifier number
-    #
-    # @param lId list of identifier number
-    #
-    # @warning old name was delPath_from_numlist
-    #
-    def deleteFromIdList(self,lId):
-        pass
-
-    ## Join two path by changing id number of id1 and id2 path to the least of id1 and id2
-    #
-    # @param id1 integer path number
-    # @param id2 integer path number
-    # @return newId integer id used to join
-    #
-    # @warning old name was joinPath
-    #
-    def joinTwoPaths(self,id1,id2):
-        pass
-    
-    ## Get a new id number
-    #
-    # @return newId integer new id
-    #
-    def getNewId(self):
-        pass
-    
-    ## Test if table is empty
-    #    
-    def isEmpty( self ):
-        pass
-    
-    ## Create a 'pathRange' table from a 'path' table. 
-    # The output table summarizes the information per identifier. 
-    # The min and max value are taken. 
-    # The identity is averaged over the fragments. 
-    # It may overwrite an existing table.
-    #
-    # @param outTable string name of the output table
-    # @return outTable string Table which summarizes the information per identifier
-    #
-    def path2PathRange( self, outTable="" ):
-        pass
-    
-    ## Return the number of times a given instance is present in the table
-    # The identifier is not considered,
-    # only coordinates, score, E-value and identity.
-    #
-    # @return nbOcc integer
-    #
-    def getNbOccurrences( self, iPath ):
-        pass
--- a/commons/core/sql/ITableSeqAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-## Interface for TableSeqAdaptator
-#
-class ITableSeqAdaptator(object):
-
-    ## Retrieve all the distinct accession names in a list.
-    #
-    # @return lAccessions list of accessions
-    #
-    # @warning old name was getListAccession
-    #
-    def getAccessionsList( self ):
-        pass
-    
-    ## Save sequences in a fasta file from a list of accession names.
-    # 
-    # @param lAccessions list of accessions
-    # @param outFileName string Fasta file
-    #
-    # @warning old name saveListAccessionInFastaFile
-    #
-    def saveAccessionsListInFastaFile( self, lAccessions, outFileName ):
-        pass
-    
-    ## insert bioseq instance
-    #
-    # @param seq bioseq 
-    # @param delayed boolean must the insert be delayed 
-    # 
-    # @warning old name was insASeq
-    #
-    def insert(self, seq, delayed = False):
-        pass
\ No newline at end of file
--- a/commons/core/sql/ITableSetAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-## Interface for TableSetAdaptator
-#
-class ITableSetAdaptator (object):
-    
-    ## Insert a set instance
-    #
-    # @param obj a set instance
-    # @param delayed boolean indicating if the insert must be delayed
-    #
-    # @warning old name was insASet
-    #
-    def insert(self, obj, delayed = False):
-        pass
-
-    ## Insert a list of Set instances
-    #
-    # @param l a list of object instances
-    # @param delayed boolean
-    #
-    # @warning old name was insSetList
-    #
-    def insertList(self, l, delayed = False):
-        pass
-    
-    ## Give a list of identifier numbers contained in the table
-    #
-    # @return l integer list
-    #
-    # @warning old name was getSet_num
-    #
-    def getIdList(self):
-        pass
-    
-    ## Give a list of Set instances having a given seq name
-    #
-    # @param seqName string seq name
-    # @return lSets list of instances
-    #
-    # @warning old name was get_SetList_from_contig
-    #
-    def getSetListFromSeqName(self, seqName):
-        pass
-        
-    ## Give a set instances list with a given identifier number
-    #
-    # @param id integer identifier number
-    # @return lSet list of set instances
-    #
-    # @warning old name was getSetList_from_num
-    #
-    def getSetListFromId(self, id):
-        pass
-    
-    ## Give a set instances list with a list of identifier numbers
-    #
-    # @param lId integers list identifiers list numbers
-    # @return lSet list of set instances
-    #
-    # @warning old name was getSetList_from_numlist
-    #   
-    def getSetListFromIdList(self,lId):
-        pass
-    
-    ## Return a list of Set instances overlapping a given sequence
-    #   
-    # @param seqName string sequence name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    # @warning old name was getSetList_from_qcoord
-    #
-    def getSetListOverlappingCoord( self, seqName, start, end ):
-        pass
-    
-    ## Delete set corresponding to a given identifier number
-    #
-    # @param id integer identifier number
-    #
-    # @warning old name was delSet_from_num 
-    #  
-    def deleteFromId(self, id):
-        pass
-    
-    ## Delete set corresponding to a given list of identifier number
-    #
-    # @param lId integers list list of identifier number
-    #  
-    # @warning old name was delSet_from_listnum 
-    #
-    def deleteFromIdList(self, lId):
-        pass
-    
-    ## Join two set by changing id number of id1 and id2 set to the least of id1 and id2
-    #
-    # @param id1 integer id path number
-    # @param id2 integer id path number
-    #
-    # @warning old name was joinSet
-    #    
-    def joinTwoSets(self, id1, id2):
-        pass
-    
-    ## Get a new id number
-    #
-    # @return new_id integer max_id + 1 
-    #
-    def getNewId(self):
-        pass
-    
-    ## Give the data contained in the table as a list of Sets instances
-    #
-    # @return lSets list of set instances
-    #
-    def getListOfAllSets( self ):
-        pass
\ No newline at end of file
--- a/commons/core/sql/Job.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-## Job informations to launch a command on a cluster.
-#
-class Job(object):
-    
-    ## Constructor
-    #
-    #   @param jobid the job identifier
-    #   @param jobname the job name
-    #   @param groupid the group identifier to record related job series 
-    #   @param queue queue name of the job manager
-    #   @param command command launched
-    #   @param node cluster node name where the execution takes place
-    #   @param launcherFile file name launched as job
-    #   @param lResources resources (memory, time...) but need to conform to SGE/Torque syntax !
-    #
-    def __init__(self, jobid=0, jobname="", groupid="", queue="", command="", launcherFile="",\
-                  node="", lResources=["mem_free=1G"], parallelEnvironment="" ):
-        if str(jobid).isdigit():
-            self.jobid = int(jobid)
-            self.jobname = jobname
-        else:
-            self.jobname = jobid
-            self.jobid = 0
-        self.jobid = jobid
-        self.groupid = groupid
-        self.setQueue(queue)
-        self.command = command
-        self.launcher = launcherFile
-        self.node = node
-        self.lResources = lResources
-        self.parallelEnvironment = parallelEnvironment
-        
-    def setQueue(self, queue):
-        self.queue = ""
-        if queue != "none":
-            self.queue = queue
-    
-    def __eq__(self, o):
-        if self.jobid == o.jobid and self.jobname == o.jobname\
-         and self.groupid == o.groupid and self.queue == o.queue and self.command == o.command \
-         and self.launcher == o.launcher and self.node == o.node and self.lResources == o.lResources \
-         and self.parallelEnvironment == o.parallelEnvironment:
-            return True
-        return False
--- a/commons/core/sql/JobAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,271 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import os
-import time
-import sys
-import tempfile
-import subprocess
-from commons.core.sql.Job import Job
-
-## Methods for Job persistence 
-#
-class JobAdaptator(object):
-    
-    def __init__(self, lJob = [], table = "" ):
-        self._lJobID = lJob
-        self._table = table
-        self._acronym = ""
-    ## Record a job
-    #
-    # @param job Job instance with the job informations
-    #
-    def recordJob(self, job):
-        self._lJobID.append(job)
-    
-    ## Remove a job from the job table
-    #
-    #  @param job: job instance to remove
-    #
-    def removeJob(self, job):
-        pass         
-            
-    ## Set the jobid of a job with the id of SGE
-    #
-    # @param job job instance
-    # @param jobid integer
-    #
-    def updateJobIdInDB(self, job, jobid):
-        pass
-        
-    ## Get a job status
-    #
-    # @param job: a Job instance with the job informations
-    #
-    def getJobStatus(self, job):
-        pass
-    
-    
-    ## Change a job status
-    #
-    # @param job: a Job instance with the job informations
-    # @param status: the new status (waiting,finished,error)
-    #
-    def changeJobStatus(self, job, status):
-        pass
-        
-    ## Get the number of jobs belonging to the desired groupid with the desired status.
-    #
-    # @param groupid string a group identifier to record related job series 
-    # @param status string job status (waiting, running, finished, error)
-    # @return int
-    #
-    def getCountStatus(self, groupid, status):
-        pass
-        
-    ## Clean all job from a job group
-    #
-    # @param groupid: a group identifier to record related job series
-    #
-    def cleanJobGroup(self, groupid):
-        pass            
-            
-    ## Check if there is unfinished job from a job group.
-    #
-    # @param groupid string a group identifier to record related job series 
-    #        
-    def hasUnfinishedJob(self, groupid):
-        pass
-
-    def _getJobIDListFromQstat(self):
-        lJobIDFromQstat = []
-        tmp = tempfile.NamedTemporaryFile(delete=False)
-        cmd ="qstat | grep %s" % self._acronym
-        process = subprocess.Popen(cmd, shell=True,stdout=tmp)
-        process.communicate()
-        tmp.close()
-        if process.returncode == 0:
-            fileName = tmp.name
-            jobidFileHandler = open(fileName, "r")        
-            for line in jobidFileHandler:
-                line2 = line.lstrip(" ")
-                lJobIDFromQstat.append(line2.split(" ")[0])
-            jobidFileHandler.close()
-            os.remove(fileName)
-        return lJobIDFromQstat     
-     
-    def _areJobsStillRunning(self,lJobID,lJobIDFromQstat):
-        sorted(lJobID)  
-        sorted(lJobIDFromQstat)
-        for i in lJobID:
-            for j in lJobIDFromQstat:
-                if int(i)== int(j):
-                    return True
-        return False
-                
-        
-    ## Wait job finished status from a job group.
-    #  Job are re-launched if error (max. 3 times)
-    #
-    # @param groupid string a group identifier to record related job series
-    # @param checkInterval integer time laps in seconds between two checks (default = 5)
-    # @param maxRelaunch integer max nb of times a job in error is relaunch before exiting (default = 3)
-    # @param exitIfTooManyErrors boolean exit if a job is still in error above maxRelaunch (default = True)
-    # @param timeOutPerJob integer max nb of seconds after which one tests if a job is still in SGE or not (default = 60*60=1h)
-    #
-    def waitJobGroup(self, groupid, checkInterval=5, maxRelaunch=3, exitIfTooManyErrors=True, timeOutPerJob=60*60):
-        
-        while True:
-            time.sleep(checkInterval)
-            lJobIDFromQstat = self._getJobIDListFromQstat()
-            if self._areJobsStillRunning(self._lJobID, lJobIDFromQstat) == False:
-                break
-    
-    ## Submit a job to a queue and record it in job table.
-    #
-    # @param job a job instance
-    # @param maxNbWaitingJobs integer max nb of waiting jobs before submitting a new one (default = 10000)
-    # @param checkInterval integer time laps in seconds between two checks (default = 30)
-    # @param verbose integer (default = 0)
-    #               
-    def submitJob(self, job, verbose=0, maxNbWaitingJobs=10000, checkInterval=30):
-        cmd = self._getQsubCommand(job)
-        tmp = tempfile.NamedTemporaryFile(delete=False)
-        process = subprocess.Popen(cmd, shell=True,stdout=tmp)
-        process.communicate()
-        tmp.close()
-        if process.returncode == 0:
-            fileName = tmp.name
-            jobidFileHandler = open(fileName, "r")
-            jobid = self._getJobidFromJobManager(jobidFileHandler)
-            if verbose > 0:
-                print "job '%i %s' submitted" % (jobid, job.jobname)
-                sys.stdout.flush()
-            job.jobid = jobid
-            #newJob= Job(job.jobid, job.jobname, job.groupid, job.queue, job.command, job.launcher, job.node, job.lResources, job.parallelEnvironment)
-            self._acronym = job.jobname.split("_")[0][:10]
-            self.recordJob(job.jobid)
-            jobidFileHandler.close()
-            os.remove(fileName)
-        return process.returncode
-
-
-    ## Get the list of nodes where jobs of one group were executed
-    #
-    # @param groupid string a group identifier of job series 
-    # @return lNodes list of nodes names without redundancy
-    #
-    def getNodesListByGroupId(self, groupId):
-        pass
-    
-    def checkJobTable(self):
-        pass
-    
-    def close(self):
-        pass
-    
-    def _getJobidAndNbJob(self, jobid) :
-        tab = jobid.split(".")
-        jobid = tab[0]
-        tab = tab[1].split(":")
-        nbJob = tab[0]
-        return jobid, nbJob
-    
-class JobAdaptatorSGE(JobAdaptator):
-
-   ## Check if a job is still handled by SGE
-    #
-    # @param jobid string job identifier
-    # @param jobname string job name
-    #  
-    def isJobStillHandledBySge(self, jobid, jobname):
-        isJobInQstat = False
-        tmp = tempfile.NamedTemporaryFile(delete=False)
-        cmd = "qstat"
-        process = subprocess.Popen(cmd, shell=True,stdout=tmp)
-        process.communicate()
-        tmp.close()
-        qstatFile = tmp.name
-        if process.returncode  != 0:
-            msg = "ERROR while launching 'qstat'"
-            sys.stderr.write( "%s\n" % msg )
-            sys.exit(1)
-        qstatFileHandler = open(qstatFile, "r")
-        lLines = qstatFileHandler.readlines()
-        for line in lLines:
-            tokens = line.split()
-            if len(tokens) > 3 and tokens[0] == str(jobid) and tokens[2] == jobname[0:len(tokens[2])]:
-                isJobInQstat = True
-                break
-        qstatFileHandler.close()
-        os.remove(qstatFile)
-        return isJobInQstat
-    
-    def _getQsubCommand(self, job):    
-        cmd = "echo '%s' | " % job.launcher
-        prg = "qsub"
-        cmd += prg
-        cmd += " -V"
-        cmd += " -N %s" % job.jobname
-        if job.queue != "":
-            cmd += " -q %s" % job.queue
-        cmd += " -cwd"
-        if job.lResources != []:
-            cmd += " -l \""
-            cmd += " ".join(job.lResources)
-            cmd += "\""
-        if job.parallelEnvironment != "":
-            cmd += " -pe " + job.parallelEnvironment
-        return cmd
-    
-    def _getJobidFromJobManager(self, jobidFileHandler):
-        return int(jobidFileHandler.readline().split(" ")[2])
-    
-
-class JobAdaptatorTorque(JobAdaptator):  
-        
-    def _getQsubCommand(self, job):    
-        cmd = "echo '%s' | " % job.launcher
-        prg = "qsub"
-        cmd += prg
-        cmd += " -V"
-        cmd += " -d %s" % os.getcwd()
-        cmd += " -N %s" % job.jobname
-        if job.queue != "":
-            cmd += " -q %s" % job.queue
-        if job.lResources != []:
-            cmd += " -l \""
-            cmd += " ".join(job.lResources).replace("mem_free","mem")
-            cmd += "\""
-        return cmd
-
-    def _getJobidFromJobManager(self, jobidFileHandler):
-        return int(jobidFileHandler.readline().split(".")[0])
--- a/commons/core/sql/OldRepetDB.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-import pyRepet.sql.RepetDBMySQL
-
-
-class RepetDB ( pyRepet.sql.RepetDBMySQL.RepetDB ):
-    
-    #TODO: try 
-    def execute( self, qry, params=None ):
-        if params == None:
-            self.cursor.execute( qry )
-        else:
-            self.cursor.execute( qry, params )
-            
-            
-    ## Record a new table in the 'info_table' table
-    #
-    # @param tablename table name
-    # @param info information on the origin of the table
-    # 
-    def updateInfoTable( self, tablename, info ):
-        self.execute( """SHOW TABLES""" )
-        results = self.fetchall()
-        if ("info_tables",) not in results:
-            sqlCmd = "CREATE TABLE info_tables ( name varchar(255), file varchar(255) )"
-            self.execute( sqlCmd )
-        qryParams = "INSERT INTO info_tables VALUES (%s, %s)"
-        params = ( tablename, info )
-        self.execute( qryParams,params )
--- a/commons/core/sql/RepetJob.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,252 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import os
-import time
-import sys
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-
-#TODO: to remove... => replace all RepetJob() by TableJobAdaptator()...
-## Methods for Job persistence 
-#
-class RepetJob( DbMySql ):
-        
-        
-    ## Record a job
-    #
-    # @param job Job instance with the job informations
-    #
-    def recordJob( self, job ):
-        self.removeJob( job )
-        sqlCmd = "INSERT INTO %s" % ( job.tablename )
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % ( job.jobid )
-        sqlCmd += " \"%s\"," % ( job.jobname )
-        sqlCmd += " \"%s\"," % ( job.groupid )
-        sqlCmd += " \"%s\"," % ( job.command.replace("\"","\'") )
-        sqlCmd += " \"%s\"," % ( job.launcher )
-        sqlCmd += " \"%s\"," % ( job.queue )
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % ( time.strftime( "%Y-%m-%d %H:%M:%S" ) )
-        sqlCmd += " \"?\" );"
-        self.execute( sqlCmd )
-        
-        
-    ## Remove a job from the job table
-    #
-    #  @param job: job instance to remove
-    #
-    def removeJob( self, job ):
-        qry = "DELETE FROM %s" % ( job.tablename )
-        qry += " WHERE groupid='%s'" % ( job.groupid )
-        qry += " AND jobname='%s'" % ( job.jobname )
-        qry += " AND queue='%s';" % ( job.queue )
-        self.execute( qry )
-            
-            
-    ## Set the jobid of a job with the id of SGE
-    #
-    # @param job job instance
-    # @param jobid integer
-    #
-    def setJobIdFromSge( self, job, jobid ):
-        qry = "UPDATE %s" % ( job.tablename )
-        qry += " SET jobid='%i'" % ( int(jobid) )
-        qry += " WHERE jobname='%s'" % ( job.jobname )
-        qry += " AND groupid='%s'" % ( job.groupid )
-        qry += " AND queue='%s';" % ( job.queue )
-        self.execute( qry )
-        
-        
-    ## Get a job status
-    #
-    # @param job: a Job instance with the job informations
-    #
-    def getJobStatus( self, job ):
-        if job.jobid != 0 and job.jobname == "":
-            job.jobname = job.jobid
-            job.jobid = 0
-        qry = "SELECT status FROM %s" % ( job.tablename )
-        qry += " WHERE groupid='%s'" % ( job.groupid )
-        qry += " AND jobname='%s'" % ( job.jobname )
-        qry += " AND queue='%s';" % ( job.queue )
-        self.execute( qry )
-        res = self.fetchall()
-        if len(res) > 1:
-            msg = "ERROR while getting job status: non-unique jobs"
-            sys.stderr.write( "%s\n" % msg )
-            sys.stderr.flush()
-            sys.exit(1)
-        if res == None or len(res) == 0:
-            return "unknown"
-        return res[0][0]
-    
-    
-    ## Change a job status
-    #
-    # @param job: a Job instance with the job informations
-    # @param status: the new status (waiting,finished,error)
-    # @param method: db or file
-    #
-    def changeJobStatus( self, job, status, method=""):
-        sqlCmd = "UPDATE %s" % ( job.tablename )
-        sqlCmd += " SET status='%s'" % ( status )
-        sqlCmd += ",node='%s'" % ( job.node )
-        sqlCmd += " WHERE groupid='%s'" % ( job.groupid )
-        sqlCmd += " AND jobname='%s'" % ( job.jobname )
-        sqlCmd += " AND queue='%s';" % ( job.queue )
-        self.execute( sqlCmd )
-        
-        
-    ## Get the number of jobs belonging to the desired groupid with the desired status.
-    #
-    # @param tablename string table name to record the jobs   
-    # @param groupid string a group identifier to record related job series 
-    # @param status string job status (waiting, running, finished, error)
-    # @return int
-    #
-    def getCountStatus( self, tablename, groupid, status ):
-        qry = "SELECT count(jobname) FROM %s" % ( tablename )
-        qry += " WHERE groupid='%s'" % ( groupid )
-        qry += " AND status='%s';" % ( status )
-        self.execute( qry )
-        res = self.fetchall()
-        return int( res[0][0] )
-        
-        
-    ## Clean all job from a job group
-    #
-    # @param tablename table name to record the jobs
-    # @param groupid: a group identifier to record related job series
-    #
-    def cleanJobGroup( self, tablename, groupid ):
-        if self.doesTableExist( tablename ):
-            qry = "DELETE FROM %s WHERE groupid='%s';" % ( tablename, groupid )
-            self.execute( qry )
-            
-            
-    ## Check if there is unfinished job from a job group.
-    #
-    # @param tablename string table name to record the jobs
-    # @param groupid string a group identifier to record related job series 
-    #        
-    def hasUnfinishedJob( self, tablename, groupid ):
-        if not self.doesTableExist( tablename ):
-            return False
-        qry = "SELECT * FROM %s" % ( tablename )
-        qry += " WHERE groupid='%s'" % ( groupid )
-        qry += " and status!='finished';" 
-        self.execute( qry )
-        res = self.fetchall()
-        if len(res) == 0:
-            return False
-        return True
-    
-         
-    ## Check if a job is still handled by SGE
-    #
-    # @param jobid string job identifier
-    # @param jobname string job name
-    #  
-    def isJobStillHandledBySge( self, jobid, jobname ):
-        isJobInQstat = False
-        qstatFile = "qstat_stdout"
-        cmd = "qstat > %s" % ( qstatFile )
-        returnStatus = os.system( cmd )
-        if returnStatus != 0:
-            msg = "ERROR while launching 'qstat'"
-            sys.stderr.write( "%s\n" % msg )
-            sys.exit(1)
-        qstatFileHandler = open( qstatFile, "r" )
-        lLines = qstatFileHandler.readlines()
-        for line in lLines:
-            tokens = line.split()
-            if len(tokens) > 3 and tokens[0] == str(jobid) and tokens[2] == jobname[0:len(tokens[2])]:
-                isJobInQstat = True
-                break
-        qstatFileHandler.close()
-        os.remove( qstatFile )
-        return isJobInQstat
-    
-    
-    ## Wait job finished status from a job group.
-    #  Job are re-launched if error (max. 3 times)
-    #
-    # @param tableName string table name to record the jobs
-    # @param groupid string a group identifier to record related job series
-    # @param checkInterval integer time laps in seconds between two checks (default = 5)
-    # @param maxRelaunch integer max nb of times a job in error is relaunch before exiting (default = 3)
-    # @param exitIfTooManyErrors boolean exit if a job is still in error above maxRelaunch (default = True)
-    # @param timeOutPerJob integer max nb of seconds after which one tests if a job is still in SGE or not (default = 60*60=1h)
-    #
-    def waitJobGroup(self, tableName, groupid, checkInterval=5, maxRelaunch=3, exitIfTooManyErrors=True, timeOutPerJob=60*60):
-        iTJA = TableJobAdaptatorFactory.createInstance(self, tableName)
-        iTJA.waitJobGroup(groupid, checkInterval, maxRelaunch, exitIfTooManyErrors, timeOutPerJob)
-                        
-    ## Submit a job to a queue and record it in job table.
-    #
-    # @param job a job instance
-    # @param maxNbWaitingJobs integer max nb of waiting jobs before submitting a new one (default = 10000)
-    # @param checkInterval integer time laps in seconds between two checks (default = 30)
-    # @param verbose integer (default = 0)
-    #               
-    def submitJob( self, job, verbose=0, maxNbWaitingJobs=10000, checkInterval=30 ):
-        iTJA = TableJobAdaptatorFactory.createInstance(self, job.tablename)
-        return iTJA.submitJob(job, verbose, maxNbWaitingJobs, checkInterval)
-                        
-        
-    ## Get the list of nodes where jobs of one group were executed
-    #
-    # @param tablename string table name where jobs are recored   
-    # @param groupid string a group identifier of job series 
-    # @return lNodes list of nodes names
-    #
-    def getNodesListByGroupId( self, tableName, groupId ):
-        qry = "SELECT node FROM %s" % tableName
-        qry += " WHERE groupid='%s'" % groupId
-        self.execute( qry )
-        res = self.fetchall()
-        lNodes = []
-        for resTuple in res:
-            lNodes.append(resTuple[0])
-        return lNodes
-    
-    def getDbName(self):
-        return "DbMySql"
-    
-    def _getJobidAndNbJob(self, jobid) :
-        tab = []
-        tab = jobid.split(".")
-        jobid = tab[0]
-        tab = tab[1].split(":")
-        nbJob = tab[0]
-        return jobid, nbJob
--- a/commons/core/sql/TableAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,128 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-## Abstract class, Ancestor of Table*Adaptator
-#
-class TableAdaptator( object ):
-    
-    ## Constructor
-    #
-    # @param iDb DbMySql instance
-    # @param table str table name
-    #
-    def __init__( self, iDb = None, table = "" ):
-        self._iDb = iDb
-        self._table = table
-        
-    ## Set connector to database
-    #
-    # @param iDb database instance
-    #
-    def setDbConnector( self, iDb ):
-        self._iDb = iDb
-        
-    ## Set table
-    #
-    # @param table string table name
-    #
-    def setTable( self, table ):
-        self._table = table
-    
-    ## Return the table name
-    #
-    def getTable( self ):
-        return self._table
-        
-    ## Return the number of rows in the table
-    #
-    def getSize( self ):
-        return self._iDb.getSize( self._table )
-    
-    ## Test if table is empty
-    #    
-    def isEmpty( self ):
-        return self._iDb.isEmpty( self._table )
-    
-    ## Insert an instance of Map or Set or Match or Path or Seq instances
-    #
-    # @param obj a Map or Set or Match or Path or Seq instance
-    # @param delayed boolean
-    #
-    def insert(self, obj, delayed = False):
-        if obj.isEmpty():
-            return
-        self._escapeAntislash(obj)
-        sql_cmd = self._genSqlCmdForInsert(obj, delayed)
-        self._iDb.execute(sql_cmd)
-    
-    ## Insert a list of Map or Set or Match or Path instances
-    #
-    # @param l a list of object instances
-    # @param delayed boolean
-    #
-    def insertList(self, l, delayed = False):
-        for i in l:
-            self.insert(i, delayed)
-            
-    ## Give the data contained in the table as a list of coord object instances
-    #
-    # @return lObject list of coord object instances
-    #
-    def getListOfAllCoordObject( self ):
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        lObjs = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lObjs
-    
-    ## Generate sql command for GetListOverlappingCoord method 
-    #  
-    # @param obj Map, Set or Match instance
-    # @param delayed boolean
-    # @return sqlCmd string generated sql command
-    #
-    def _genSqlCmdForInsert(self, obj, delayed):
-        sqlCmd = 'INSERT '
-        if delayed :
-            sqlCmd += ' DELAYED '
-        type2Insert, attr2Insert = self._getTypeAndAttr2Insert(obj)
-        sqlCmd +=  'INTO %s VALUES (' % (self._table) 
-        sqlCmd +=  ",".join(type2Insert)
-        sqlCmd += ")" 
-        sqlCmd = sqlCmd % attr2Insert
-        return sqlCmd
-   
-    def _getTypeAndAttr2Insert(self, obj):
-        pass
-    
-    def _getInstanceToAdapt(self):
-        pass
-    
-    def _escapeAntislash(self, obj):
-        pass
--- a/commons/core/sql/TableBinPathAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,257 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-from commons.core.coord.Range import getIdx
-from commons.core.sql.TablePathAdaptator import TablePathAdaptator
-from commons.core.coord.PathUtils import PathUtils
-
-## Bin Adaptator for a path table.
-#
-class TableBinPathAdaptator(TablePathAdaptator):
-
-    
-    ## Constructor
-    #
-    # @param db db instance
-    # @param tableName string table name (default = "")
-    #
-    def __init__(self, db, tableName = ""):
-        TablePathAdaptator.__init__(self, db, tableName)
-        self._table_idx = "%s_idx" % (self._table)
-            
-    ## Insert a path instance
-    #
-    # @param path a path instance
-    # @param delayed boolean indicating if the insert must be delayed (default = false) 
-    #        
-    def insert( self, path, delayed = False ):
-        TablePathAdaptator.insert(self, path, delayed)
-        self._escapeAntislash(path)
-        idx = path.range_query.findIdx()
-        max = path.range_query.getMax()
-        min = path.range_query.getMin()
-        strand = path.range_query.isOnDirectStrand()
-        if delayed:
-            sql_cmd = 'INSERT DELAYED INTO %s VALUES (%d,%d,"%s",%d,%d,%d)'\
-                 % (self._table_idx,\
-                   path.id,\
-                   idx,\
-                   path.range_query.seqname,\
-                   min,\
-                   max,\
-                   strand)
-        else:
-            sql_cmd = 'INSERT INTO %s VALUES (%d,%d,"%s",%d,%d,%d)'\
-                 % (self._table_idx,\
-                   path.id,\
-                   idx,\
-                   path.range_query.seqname,\
-                   min,\
-                   max,\
-                   strand)
-            
-        self._iDb.execute(sql_cmd)
-    
-    ## Return a path instances list included in a given region using the bin scheme
-    #
-    # @param contig string contig name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lOutPath a path instances list
-    #
-    def getPathListIncludedInQueryCoord(self, contig, start, end):
-        min_coord = min(start, end)
-        max_coord = max(start, end)
-        lpath = self.getChainListOverlappingQueryCoord(contig, start, end)
-        lOutPath = []
-        for i in lpath:
-            if i.range_query.getMin() > min_coord and \
-               i.range_query.getMax() < max_coord:
-                lOutPath.append(i)
-                            
-        return lOutPath
-    
-    ## Return a path instances list overlapping (and included) in a given region using the bin scheme
-    #
-    # @param contig string contig name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lOutPath a path instances list
-    #
-    def getPathListOverlappingQueryCoord(self, contig, start, end):
-        min_coord = min(start, end)
-        max_coord = max(start, end)
-        lpath = self.getChainListOverlappingQueryCoord(contig, start, end)
-        lOutPath = []
-        for i in lpath:
-            if ((i.range_query.getMin() <= min_coord and i.range_query.getMax() >= min_coord) or \
-                (i.range_query.getMin() >= min_coord and i.range_query.getMin() <= max_coord) or \
-                (i.range_query.getMin() <= min_coord and i.range_query.getMax() >= max_coord) or \
-                (i.range_query.getMin() >= min_coord and i.range_query.getMax() <= max_coord)) and \
-                (i.range_query.getSeqname() == contig):
-                    lOutPath.append(i)
-                    
-        return lOutPath
-    
-    ## Return a path instances list chain (by Id and Coord in chr) list overlapping a given region using the bin scheme
-    #
-    # @param contig string contig name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lpath a path instances list
-    #    
-    def getChainListOverlappingQueryCoord(self, contig, start, end):
-        min_coord = min(start, end)
-        max_coord = max(start, end)
-        sql_cmd = 'select distinct path from %s where contig="%s" and ('\
-                 % (self._table + "_idx", contig)
-                 
-        for bin_lvl in xrange(6, 2, -1):
-            if getIdx(start,bin_lvl) == getIdx(end, bin_lvl):
-                idx = getIdx(start, bin_lvl)
-                sql_cmd += 'idx=%d' % (idx)
-            else:
-                idx1 = getIdx(min_coord, bin_lvl)
-                idx2 = getIdx(max_coord, bin_lvl)
-                sql_cmd += 'idx between %d and %d' % (idx1, idx2)
-            if bin_lvl > 3:
-                sql_cmd += " or "
-                
-        sql_cmd += ") and min<=%d and max>=%d;" % (max_coord, min_coord)
-
-        
-        self._iDb.execute(sql_cmd)
-        res = self._iDb.fetchall()
-        lnum = []
-        for i in res:
-            lnum.append( int(i[0]) )
-        lpath = self.getPathListFromIdList(lnum)
-        return lpath
-
-    ## Delete path corresponding to a given identifier number
-    #
-    # @param num integer identifier number
-    #
-    def deleteFromId(self, num):
-        TablePathAdaptator.deleteFromId(self, num)
-        sqlCmd='delete from %s where path=%d;' % (self._table_idx, num)
-        self._iDb.execute(sqlCmd)
-    
-    ## Delete path corresponding to a given list of identifier number
-    #
-    # @param lNum list list of integer identifier number
-    #
-    def deleteFromIdList(self, lNum):
-        if lNum == []:
-            return
-        TablePathAdaptator.deleteFromIdList(self, lNum)
-        sqlCmd = 'delete from %s where path=%d' % (self._table_idx, lNum[0])
-        for i in lNum[1:]:
-            sqlCmd += " or path=%d" % (i)
-        sqlCmd += ";"
-        self._iDb.execute(sqlCmd)
-             
-    ##  Join two path by changing id number of id1 and id2 path to the least of id1 and id2
-    #
-    # @param id1 integer id path number
-    # @param id2 integer id path number
-    # @return newId integer minimum of id1 id2
-    # @note this method modify the ID even if this one not existing in the path table  
-    #     
-    def joinTwoPaths(self, id1, id2):
-        TablePathAdaptator.joinTwoPaths(self, id1, id2)
-        if id1 < id2:
-            newId = id1
-            oldId = id2
-        else:
-            newId = id2
-            oldId = id1
-        sqlCmd = 'UPDATE %s SET path=%d WHERE path=%d' % (self._table_idx, newId, oldId)
-        self._iDb.execute(sqlCmd)
-        return newId
-    
-    ## Get a new id number
-    #
-    # @return newId integer max Id in path table + 1
-    #
-    def getNewId(self):
-        sqlCmd = 'select max(path) from %s;' % (self._table_idx)
-        self._iDb.execute(sqlCmd)
-        maxId = self._iDb.fetchall()[0][0]
-        if maxId == None:
-            maxId = 0
-        newId = int(maxId) + 1
-        return newId
-    
-    ## Give a list of Set instances included in a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListIncludedInQueryCoord(self, query, start, end):
-        lPath=self.getPathListIncludedInQueryCoord(query, start, end)
-        lSet = PathUtils.getSetListFromQueries(lPath) 
-        return lSet
-    
-    ## Give a list of Set instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListOverlappingQueryCoord(self, query, start, end):
-        lPath = self.getPathListOverlappingQueryCoord(query, start, end)
-        lSet = PathUtils.getSetListFromQueries(lPath)
-        return lSet
-    
-    ## Give a list of identifiers contained in the table
-    #
-    # @return lId integer list
-    #
-    def getIdList(self):
-        sqlCmd = "SELECT DISTINCT path from %s;" % (self._table_idx)
-        lId = self._iDb.getIntegerListWithSQLCmd( sqlCmd )
-        return lId
-        
-    ## Give a list of the distinct query names present in the table
-    #
-    # @return lDistinctQueryNames string list
-    #
-    def getQueryList(self):
-        lDistinctQueryNames = self._getDistinctTypeNamesList("query")
-        return lDistinctQueryNames
-    
-    def _getDistinctTypeNamesList( self, type ):
-        sqlCmd = "SELECT DISTINCT contig FROM %s" % ( self._table_idx )
-        lDistinctTypeNames = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        return lDistinctTypeNames
\ No newline at end of file
--- a/commons/core/sql/TableBinSetAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,265 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-from commons.core.sql.TableSetAdaptator import TableSetAdaptator
-from commons.core.coord.SetUtils import SetUtils
-
-## Adaptator for Set tables with bin indexes
-#
-class TableBinSetAdaptator(TableSetAdaptator):
-   
-    ## constructor
-    #
-    # @param iDb DbMySql instance instance of DbMySql
-    # @param tableName string table name (default = "")
-    #
-    def __init__(self, iDb, tableName = ""):
-        TableSetAdaptator.__init__(self, iDb, tableName)
-        self._table_idx = "%s_idx" % (self._table)
-        
-    ## Insert a set instance in a set bin table
-    # 
-    # @param iSet set instance an instance of set object
-    # @param delayed boolean an insert delayed or not
-    #
-    def insASetInSetAndBinTable(self, iSet, delayed = False):
-        self.insert(iSet, delayed)
-        iSet.seqname = iSet.seqname.replace("\\", "\\\\")
-        iSet.name = iSet.name.replace("\\", "\\\\")
-        bin = iSet.getBin()
-        max = iSet.getMax()
-        min = iSet.getMin()
-        strand = iSet.isOnDirectStrand()
-        sql_prefix = ''
-        if delayed:
-            sql_prefix = 'INSERT DELAYED INTO '
-        else:
-            sql_prefix = 'INSERT INTO '
-        sql_cmd = sql_prefix + '%s VALUES (%d,%f,"%s",%d,%d,%d)'\
-                 %(self._table_idx,\
-                   iSet.id,\
-                   bin,\
-                   iSet.seqname,\
-                   min,\
-                   max,\
-                   strand)
-        self._iDb.execute(sql_cmd)
-
-    ## Delete set corresponding to a given identifier number in set and bin set table
-    # @param id integer identifier number
-    # @note old name was delSet_from_num
-    #
-    def deleteFromIdFromSetAndBinTable(self, id):
-        self.deleteFromId(id)
-        sql_cmd = 'delete from %s where path=%d' % (self._table_idx, id)
-        self._iDb.execute(sql_cmd)
-
-    ## Delete path corresponding to a given list of identifier number
-    #
-    # @param lId integer list list of identifier number
-    # @note old name was delSet_from_listnum
-    #
-    def deleteFromListIdFromSetAndBinTable(self, lId):
-        if lId != []:
-            self.deleteFromIdList(lId)
-            sql_cmd = 'delete from %s where path=%d' % (self._table_idx, lId[0])
-            for i in lId[1:]:
-                sql_cmd += " or path=%d" % (i)
-            self._iDb.execute(sql_cmd)
-
-    ## Join two set by changing id number of id1 and id2 path
-    # to the least of id1 and id2
-    #
-    # @param id1 integer id path number
-    # @param id2 integer id path number
-    # @return id integer new id
-    # @note old name was joinSet
-    #
-    def joinTwoSetsFromSetAndBinTable(self, id1, id2):
-        self.joinTwoSets(id1, id2)
-        if id1 < id2:
-            new_id = id1
-            old_id = id2
-        else:
-            new_id = id2
-            old_id = id1
-        sql_cmd = 'UPDATE %s SET path=%d WHERE path=%d'\
-                % (self._table_idx, new_id, old_id)
-        self._iDb.execute(sql_cmd)
-        return new_id
-    
-    ## Get a new id number from set bin table
-    #
-    def getNewId(self):
-        sql_cmd = 'select max(path) from %s;' % (self._table_idx)
-        self._iDb.execute(sql_cmd)
-        max_id = self._iDb.fetchall()[0][0]
-        if max_id != None:
-            return int(max_id)+1
-        else:
-            return 1
-        
-    ## Get a set list instance between start and end parameters
-    # using the bin scheme
-    #
-    # @param seqName reference seq name
-    # @param start start coordinate
-    # @param end end coordinate
-    # @return lSet set list
-    # @note old name was getSetList_from_qcoord
-    #
-    def getSetListFromQueryCoord(self, seqName, start, end):
-
-        min_coord = min(start,end)
-        max_coord = max(start,end)
-
-        sql_cmd = 'select path from %s where contig="%s" and ('\
-                 % (self._table + "_idx", seqName)
-        for i in xrange(8, 2, -1):
-            bin_lvl = pow(10, i)
-            if int(start/bin_lvl) == int(end/bin_lvl):       
-                bin = float(bin_lvl + (int(start / bin_lvl) / 1e10))
-                sql_cmd += 'bin=%f' % (bin)
-            else:
-                bin1 = float(bin_lvl + (int(start / bin_lvl) / 1e10))
-                bin2 = float(bin_lvl + (int(end  /bin_lvl) / 1e10))
-                sql_cmd += 'bin between %f and %f' % (bin1, bin2)
-            if bin_lvl != 1000:
-                sql_cmd += " or "
-
-        sql_cmd += ") and min<=%d and max>=%d" % (max_coord, min_coord);
-        self._iDb.execute(sql_cmd)
-        res = self._iDb.fetchall()
-        lId = []
-        for i in res:
-            lId.append(int(i[0]))
-        lSet = self.getSetListFromIdList(lId)
-        return lSet
-
-    ## Get a set list instances strictly included between start and end parameters
-    # using the bin scheme
-    #
-    # @param seqName reference seq name
-    # @param start start coordinate
-    # @param end end coordinate
-    # @return lSet set list
-    # @note old name was getInSetList_from_qcoord
-    # @warning the implementation has been changed : I added the two first lines
-    #
-    def getSetListStrictlyIncludedInQueryCoord(self, contig, start, end):
-        min_coord = min(start,end)
-        max_coord = max(start,end)
-        lSet = self.getSetListFromQueryCoord(contig, start, end)       
-        lSetStrictlyIncluded = []
-        for iSet in lSet:
-            if iSet.getMin() > min_coord and \
-               iSet.getMax() < max_coord:
-                lSetStrictlyIncluded.append(iSet)
-                            
-        return lSetStrictlyIncluded
-    
-    ## Get a list of the identifier Id contained in the table bin
-    #
-    # @return lId list of int list of identifier
-    # @note old name was getSet_num
-    #
-    def getIdList(self):
-        sql_cmd = 'select distinct path from %s;' % (self._table_idx)
-        self._iDb.execute(sql_cmd)
-        res = self._iDb.fetchall()
-        lId = []
-        for t in res:
-            lId.append(int(t[0]))
-        return lId
-    
-    ## Get a list of the query sequence name contained in the table bin
-    #
-    # @return lSeqName list of string list of query sequence name
-    # @note old name was getContig_name
-    #
-    def getSeqNameList(self):
-        sql_cmd = 'select distinct contig from %s;' % (self._table_idx)
-        self._iDb.execute(sql_cmd)
-        res = self._iDb.fetchall()
-        lSeqName = []
-        for t in res:
-            lSeqName.append(t[0])
-        return lSeqName
-    
-    ## Insert a Set list with the same new identifier in the table bin and set
-    #
-    # @note old name was insAddSetList
-    #
-    def insertListInSetAndBinTable(self, lSets, delayed = False):
-        id = self.getNewId()
-        SetUtils.changeIdInList( lSets, id )
-        for iSet in lSets:
-            self.insASetInSetAndBinTable(iSet, delayed)
-    
-    ## Insert a set list instances In table Bin and Set and merge all overlapping sets
-    #
-    # @param lSets reference seq name
-    # @note old name was insMergeSetList
-    #    
-    def insertListInSetAndBinTableAndMergeAllSets(self, lSets):
-        min, max = SetUtils.getListBoundaries(lSets)
-        oldLSet = self.getSetListFromQueryCoord(lSets[0].seqname, min, max)
-        oldQueryhash = SetUtils.getDictOfListsWithIdAsKey(oldLSet)
-        qhash = SetUtils.getDictOfListsWithIdAsKey(lSets)
-        for lNewSetById in qhash.values():
-            found = False
-            for currentId, oldLsetById in oldQueryhash.items():
-                if SetUtils.areSetsOverlappingBetweenLists(lNewSetById, oldLsetById):
-                    oldLsetById.extend(lNewSetById)
-                    oldLsetById = SetUtils.mergeSetsInList(oldLsetById)
-                    self.deleteFromIdFromSetAndBinTable(currentId)
-                    found = True
-            if not found:
-                self.insertListInSetAndBinTable(lNewSetById)
-            else:
-                id = self.getNewId()
-                SetUtils.changeIdInList(oldLsetById, id)
-                self.insertListInSetAndBinTable(oldLsetById)
-                
-    ## Insert a set list instances In table Bin and Set after removing all overlaps between database and lSets
-    #
-    # @param lSets reference seq name
-    # @note old name was insDiffSetList
-    #    
-    def insertListInSetAndBinTableAndRemoveOverlaps(self, lSets):
-        min, max = SetUtils.getListBoundaries(lSets)
-        oldLSet = self.getSetListFromQueryCoord(lSets[0].seqname, min, max)
-        oldQueryHash = SetUtils.getDictOfListsWithIdAsKey(oldLSet)
-        newQueryHash = SetUtils.getDictOfListsWithIdAsKey(lSets)
-        for lNewSetById in newQueryHash.values():
-            for lOldSetById in oldQueryHash.values():
-                if SetUtils.areSetsOverlappingBetweenLists(lNewSetById, lOldSetById):
-                    lNewSetById = SetUtils.getListOfSetWithoutOverlappingBetweenTwoListOfSet(lOldSetById, lNewSetById)
-            self.insertListInSetAndBinTable(lNewSetById)
--- a/commons/core/sql/TableJobAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,405 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import os
-import time
-import datetime
-import sys
-from commons.core.sql.Job import Job 
-from commons.core.sql.TableAdaptator import TableAdaptator
-
-## Methods for Job persistence 
-#
-class TableJobAdaptator(TableAdaptator):
-        
-    ## Record a job
-    #
-    # @param job Job instance with the job informations
-    #
-    def recordJob(self, job):
-        self.removeJob(job)
-        sqlCmd = "INSERT INTO %s" % self._table
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % job.jobid
-        sqlCmd += " \"%s\"," % job.jobname
-        sqlCmd += " \"%s\"," % job.groupid
-        sqlCmd += " \"%s\"," % job.launcher
-        sqlCmd += " \"%s\"," % job.queue
-        sqlCmd += " \"%s\"," % job.lResources
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % time.strftime("%Y-%m-%d %H:%M:%S")
-        sqlCmd += " \"?\" );"
-        self._iDb.execute(sqlCmd)
-        
-       
-    ## Remove a job from the job table
-    #
-    #  @param job: job instance to remove
-    #
-    def removeJob(self, job):
-        qry = "DELETE FROM %s" % self._table
-        qry += " WHERE groupid='%s'" % job.groupid
-        qry += " AND jobname='%s'" % job.jobname
-        qry += " AND launcher='%s';" % job.launcher
-        self._iDb.execute(qry)
-            
-            
-    ## Set the jobid of a job with the id of SGE
-    #
-    # @param job job instance
-    # @param jobid integer
-    #
-    def updateJobIdInDB(self, job, jobid):
-        #TODO: check if only one job will be updated
-        qry = "UPDATE %s" % self._table
-        qry += " SET jobid='%i'" % int(jobid)
-        qry += " WHERE jobname='%s'" % job.jobname
-        qry += " AND groupid='%s'" % job.groupid
-        qry += " AND launcher='%s';" % job.launcher
-        self._iDb.execute(qry)
-        
-        
-    ## Get a job status
-    #
-    # @param job: a Job instance with the job informations
-    #
-    def getJobStatus(self, job):
-        if job.jobid != 0 and job.jobname == "":
-            job.jobname = job.jobid
-            job.jobid = 0
-        qry = "SELECT status FROM %s" % self._table
-        qry += " WHERE groupid='%s'" % job.groupid
-        qry += " AND jobname='%s'" % job.jobname
-        qry += " AND launcher='%s';" % job.launcher
-        self._iDb.execute(qry)
-        res = self._iDb.fetchall()
-        if len(res) > 1:
-            sys.stderr.write("ERROR while getting job status: non-unique jobs\n")
-            sys.stderr.flush()
-            sys.exit(1)
-        if res == None or len(res) == 0:
-            return "unknown"
-        return res[0][0]
-    
-    
-    ## Change a job status
-    #
-    # @param job: a Job instance with the job informations
-    # @param status: the new status (waiting,finished,error)
-    #
-    def changeJobStatus(self, job, status):
-        sqlCmd = "UPDATE %s" % self._table
-        sqlCmd += " SET status='%s'" % status
-        sqlCmd += ", node='%s'" % job.node
-        sqlCmd += " WHERE groupid='%s'" % job.groupid
-        sqlCmd += " AND jobname='%s'" % job.jobname
-        sqlCmd += " AND launcher='%s';" % job.launcher
-        self._iDb.execute(sqlCmd)
-        
-        
-    ## Get the number of jobs belonging to the desired groupid with the desired status.
-    #
-    # @param groupid string a group identifier to record related job series 
-    # @param status string job status (waiting, running, finished, error)
-    # @return int
-    #
-    def getCountStatus(self, groupid, status):
-        qry = "SELECT count(jobname) FROM %s" % self._table
-        qry += " WHERE groupid='%s'" % groupid
-        qry += " AND status='%s';" % status
-        self._iDb.execute(qry)
-        res = self._iDb.fetchall()
-        return int(res[0][0])
-        
-        
-    ## Clean all job from a job group
-    #
-    # @param groupid: a group identifier to record related job series
-    #
-    def cleanJobGroup(self, groupid):
-        qry = "DELETE FROM %s WHERE groupid='%s';" % (self._table, groupid)
-        self._iDb.execute(qry)
-            
-            
-    ## Check if there is unfinished job from a job group.
-    #
-    # @param groupid string a group identifier to record related job series 
-    #        
-    def hasUnfinishedJob(self, groupid):
-        qry = "SELECT * FROM %s" % self._table
-        qry += " WHERE groupid='%s'" % groupid
-        qry += " and status!='finished';" 
-        self._iDb.execute(qry)
-        res = self._iDb.fetchall()
-        if len(res) == 0:
-            return False
-        return True
-    
-
-    ## Wait job finished status from a job group.
-    #  Job are re-launched if error (max. 3 times)
-    #
-    # @param groupid string a group identifier to record related job series
-    # @param checkInterval integer time laps in seconds between two checks (default = 5)
-    # @param maxRelaunch integer max nb of times a job in error is relaunch before exiting (default = 3)
-    # @param exitIfTooManyErrors boolean exit if a job is still in error above maxRelaunch (default = True)
-    # @param timeOutPerJob integer max nb of seconds after which one tests if a job is still in SGE or not (default = 60*60=1h)
-    #
-    def waitJobGroup(self, groupid, checkInterval=5, maxRelaunch=3, exitIfTooManyErrors=True, timeOutPerJob=60*60):
-        dJob2Err = {}
-        
-        # retrieve the total number of jobs belonging to the desired groupid
-        qry = "SELECT count(jobname) FROM %s WHERE groupid='%s';" % (self._table, groupid)
-        self._iDb.execute(qry)
-        totalNbJobs = int(self._iDb.fetchall()[0][0])
-        
-        nbTimeOuts = 0
-        
-        while True:
-            time.sleep(checkInterval)
-            # retrieve the finished jobs and stop if all jobs are finished
-            nbFinishedJobs = self.getCountStatus(groupid, "finished")
-            if nbFinishedJobs == totalNbJobs:
-                break
-
-            # retrieve the jobs in error and relaunch them if they are in error (max. 'maxRelaunch' times)
-            qry = "SELECT * FROM %s" % self._table
-            qry += " WHERE groupid='%s'" % groupid
-            qry += " AND status ='error';"
-            self._iDb.execute(qry)
-            lJobsInError = self._iDb.fetchall()
-            for job in lJobsInError:
-                jobName = job[1]
-                if not dJob2Err.has_key(jobName):
-                    dJob2Err[jobName] = 1
-                if dJob2Err[jobName] < maxRelaunch:
-                    print "job '%s' in error, re-submitting (%i)" % (job[1], dJob2Err[job[1]])
-                    sys.stdout.flush()
-                    lResources = job[5].replace("[", "").replace("]", "").replace("'", "").split(", ")
-                    newJob = Job(jobname=jobName, groupid=job[2], launcherFile=job[3], queue=job[4], lResources=lResources)
-                    self.submitJob(newJob)
-                    dJob2Err[jobName] += 1
-                else:
-                    dJob2Err[jobName] += 1
-                    cmd = "job '%s' in permanent error (>%i)" % (jobName, maxRelaunch)
-                    cmd += "\ngroupid = %s" % groupid
-                    cmd += "\nnb of jobs = %i" % totalNbJobs
-                    cmd += "\nnb of finished jobs = %i" % self.getCountStatus(groupid, "finished")
-                    cmd += "\nnb of waiting jobs = %i" % self.getCountStatus(groupid, "waiting")
-                    cmd += "\nnb of running jobs = %i" % self.getCountStatus(groupid, "running")
-                    cmd += "\nnb of jobs in error = %i" % self.getCountStatus(groupid, "error")
-                    sys.stdout.flush()
-                    if exitIfTooManyErrors:
-                        self.cleanJobGroup(groupid)
-                        sys.exit(1)
-                    else:
-                        checkInterval = 60
-            nbTimeOuts = self._checkIfJobsTableAndJobsManagerInfoAreConsistent(nbTimeOuts, timeOutPerJob, groupid)
-    
-    
-    ## Submit a job to a queue and record it in job table.
-    #
-    # @param job a job instance
-    # @param maxNbWaitingJobs integer max nb of waiting jobs before submitting a new one (default = 10000)
-    # @param checkInterval integer time laps in seconds between two checks (default = 30)
-    # @param verbose integer (default = 0)
-    #               
-    def submitJob(self, job, verbose=0, maxNbWaitingJobs=10000, checkInterval=30):
-        if self.getJobStatus(job) in ["waiting", "running", "finished"]:
-            sys.stderr.write( "WARNING: job '%s' was already submitted\n" % job.jobname)
-            sys.stderr.flush()
-            self.cleanJobGroup(job.groupid)
-            sys.exit(1)
-            
-        while self.getCountStatus(job.groupid, "waiting") > maxNbWaitingJobs:
-            time.sleep(checkInterval)
-
-        self.recordJob(job)
-        cmd = self._getQsubCommand(job)
-        returnStatus = os.system(cmd)
-
-        if returnStatus == 0:
-            fileName = "jobid.stdout"
-            jobidFileHandler = open(fileName, "r")
-            jobid = self._getJobidFromJobManager(jobidFileHandler)
-            if verbose > 0:
-                print "job '%i %s' submitted" % (jobid, job.jobname)
-                sys.stdout.flush()
-            job.jobid = jobid
-            jobidFileHandler.close()
-            self.updateJobIdInDB(job, jobid)
-            os.remove(fileName)
-        return returnStatus
-
-
-    ## Get the list of nodes where jobs of one group were executed
-    #
-    # @param groupid string a group identifier of job series 
-    # @return lNodes list of nodes names without redundancy
-    #
-    def getNodesListByGroupId(self, groupId):
-        qry = "SELECT DISTINCT node FROM %s" % self._table
-        qry += " WHERE groupid='%s'" % groupId
-        self._iDb.execute(qry)
-        res = self._iDb.fetchall()
-        lNodes = []
-        for resTuple in res:
-            lNodes.append(resTuple[0])
-        return lNodes
-    
-    def checkJobTable(self):
-        if not self._iDb.doesTableExist(self._table):
-            self._iDb.createTable(self._table, "jobs")
-        else:
-            lExpFields = sorted(["jobid", "jobname", "groupid", "launcher", "queue", "resources", "status", "time", "node"])
-            lObsFields = sorted(self._iDb.getFieldList(self._table))
-            if lExpFields != lObsFields:
-                self._iDb.createTable(self._table, "jobs", overwrite = True)
-    
-    def close(self):
-        self._iDb.close() 
-    
-    def _getJobidAndNbJob(self, jobid) :
-        tab = jobid.split(".")
-        jobid = tab[0]
-        tab = tab[1].split(":")
-        nbJob = tab[0]
-        return jobid, nbJob
-    
-class TableJobAdaptatorSGE(TableJobAdaptator):
-        
-    def _checkIfJobsTableAndJobsManagerInfoAreConsistent(self, nbTimeOuts, timeOutPerJob, groupid):
-        # retrieve the date and time at which the oldest, still-running job was submitted
-        sql = "SELECT jobid,jobname,time FROM %s WHERE groupid='%s' AND status='running' ORDER BY time DESC LIMIT 1" % (self._table, groupid)
-        self._iDb.execute( sql )
-        res = self._iDb.fetchall()
-        if len(res) > 0:
-            jobid = res[0][0]
-            jobname = res[0][1]
-            dateTimeOldestJob = res[0][2]
-            dateTimeCurrent = datetime.datetime.now()
-            # delta is time between (i) first job launched of the given groupid and still in running state and (ii) current time 
-            delta = dateTimeCurrent - dateTimeOldestJob
-            # check if delta is in an interval:  0 <= delta < 1h | 1h <= delta < 2h | 2h <= delta < 3h (timeOutPerJob = 1h)  
-            if delta.seconds >= nbTimeOuts * timeOutPerJob and delta.seconds < (nbTimeOuts+1) * timeOutPerJob:
-                return nbTimeOuts
-            # delta outside the interval: go to next interval (time out) 
-            if delta.seconds >= (nbTimeOuts+1) * timeOutPerJob:
-                nbTimeOuts += 1
-                # Job with 'running' status should be in qstat. Because status in DB is set at 'running' by the job launched.
-                if not self.isJobStillHandledBySge(jobid, jobname):
-                    # But if not, let time for the status update (in DB), if the job finished between the query execution and now.
-                    time.sleep( 5 )
-                # If no update at 'finished', exit
-                #TODO: check status in DB
-                if not self.isJobStillHandledBySge(jobid, jobname):
-                    msg = "ERROR: job '%s', supposedly still running, is not handled by SGE anymore" % ( jobid )
-                    msg += "\nit was launched the %s (> %.2f hours ago)" % ( dateTimeOldestJob, timeOutPerJob/3600.0 )
-                    msg += "\nthis problem can be due to:"
-                    msg += "\n* memory shortage, in that case, decrease the size of your jobs;"
-                    msg += "\n* timeout, in that case, decrease the size of your jobs;"
-                    msg += "\n* node failure or database error, in that case, launch the program again or ask your system administrator."
-                    sys.stderr.write("%s\n" % msg)
-                    sys.stderr.flush()
-                    self.cleanJobGroup(groupid)
-                    sys.exit(1)
-        return nbTimeOuts
-                        
-    ## Check if a job is still handled by SGE
-    #
-    # @param jobid string job identifier
-    # @param jobname string job name
-    #  
-    def isJobStillHandledBySge(self, jobid, jobname):
-        isJobInQstat = False
-        qstatFile = "qstat_stdout"
-        cmd = "qstat > %s" % qstatFile
-        returnStatus = os.system(cmd)
-        if returnStatus != 0:
-            msg = "ERROR while launching 'qstat'"
-            sys.stderr.write( "%s\n" % msg )
-            sys.exit(1)
-        qstatFileHandler = open(qstatFile, "r")
-        lLines = qstatFileHandler.readlines()
-        for line in lLines:
-            tokens = line.split()
-            if len(tokens) > 3 and tokens[0] == str(jobid) and tokens[2] == jobname[0:len(tokens[2])]:
-                isJobInQstat = True
-                break
-        qstatFileHandler.close()
-        os.remove(qstatFile)
-        return isJobInQstat
-    
-    def _getQsubCommand(self, job):    
-        cmd = "echo '%s' | " % job.launcher
-        prg = "qsub"
-        cmd += prg
-        cmd += " -V"
-        cmd += " -N %s" % job.jobname
-        if job.queue != "":
-            cmd += " -q %s" % job.queue
-        cmd += " -cwd"
-        if job.lResources != []:
-            cmd += " -l \""
-            cmd += " ".join(job.lResources)
-            cmd += "\""
-        if job.parallelEnvironment != "":
-            cmd += " -pe " + job.parallelEnvironment
-        cmd += " > jobid.stdout"
-        return cmd
-    
-    def _getJobidFromJobManager(self, jobidFileHandler):
-        return int(jobidFileHandler.readline().split(" ")[2])
-    
-
-class TableJobAdaptatorTorque(TableJobAdaptator):  
-                        
-    def _checkIfJobsTableAndJobsManagerInfoAreConsistent(self, nbTimeOuts, timeOutPerJob, groupid):
-        return nbTimeOuts
-        
-    def _getQsubCommand(self, job):    
-        cmd = "echo '%s' | " % job.launcher
-        prg = "qsub"
-        cmd += prg
-        cmd += " -V"
-        cmd += " -d %s" % os.getcwd()
-        cmd += " -N %s" % job.jobname
-        if job.queue != "":
-            cmd += " -q %s" % job.queue
-        if job.lResources != []:
-            cmd += " -l \""
-            cmd += " ".join(job.lResources).replace("mem_free","mem")
-            cmd += "\""
-        cmd += " > jobid.stdout"
-        return cmd
-
-    def _getJobidFromJobManager(self, jobidFileHandler):
-        return int(jobidFileHandler.readline().split(".")[0])
--- a/commons/core/sql/TableJobAdaptatorFactory.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-import os
-import sys
-from commons.core.sql.TableJobAdaptator import TableJobAdaptatorSGE
-from commons.core.sql.TableJobAdaptator import TableJobAdaptatorTorque
-from commons.core.sql.JobAdaptator import JobAdaptatorSGE
-from commons.core.sql.JobAdaptator import JobAdaptatorTorque
-
-class TableJobAdaptatorFactory(object):
-
-    def createInstance(iDb, jobTableName):
-        if os.environ["REPET_JOB_MANAGER"].lower() == "sge":
-            iTJA = TableJobAdaptatorSGE(iDb, jobTableName)
-        elif os.environ["REPET_JOB_MANAGER"].lower() == "torque":
-            iTJA = TableJobAdaptatorTorque(iDb, jobTableName)
-        else:
-            print "ERROR: unknown jobs manager : $REPET_JOB_MANAGER = %s." % os.environ["REPET_JOB_MANAGER"]
-            sys.exit(1)
-            
-        return iTJA
-
-    createInstance = staticmethod(createInstance)
-       
-    def createJobInstance():
-        if os.environ["REPET_JOB_MANAGER"].lower() == "sge":
-            iJA = JobAdaptatorSGE()
-        elif os.environ["REPET_JOB_MANAGER"].lower() == "torque":
-            iJA = JobAdaptatorTorque()
-        else:
-            print "ERROR: unknown jobs manager : $REPET_JOB_MANAGER = %s." % os.environ["REPET_JOB_MANAGER"]
-            sys.exit(1)
-            
-        return iJA   
-    
-
-    createJobInstance = staticmethod(createJobInstance)
-    
\ No newline at end of file
--- a/commons/core/sql/TableMapAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,193 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import sys
-from commons.core.sql.TableAdaptator import TableAdaptator
-from commons.core.sql.ITableMapAdaptator import ITableMapAdaptator
-from commons.core.coord.Map import Map
-from commons.core.coord.MapUtils import MapUtils
-
-
-## Adaptator for Map table
-#
-class TableMapAdaptator( TableAdaptator, ITableMapAdaptator ):
-            
-    ## Give a list of Map instances having a given seq name
-    #
-    # @param seqName string seq name
-    # @return lMap list of instances
-    #
-    def getListFromSeqName( self, seqName ):
-        sqlCmd = "SELECT * FROM %s" % (self._table)
-        colum2Get, type2Get, attr2Get = self._getTypeColumAttr2Get(seqName)
-        sqlCmd += " WHERE " + colum2Get
-        sqlCmd += " = "
-        sqlCmd = sqlCmd + type2Get
-        sqlCmd = sqlCmd % "'" + attr2Get + "'"
-        return self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        
-    ## Give a list of Map instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return list map instances
-    #
-    def getListOverlappingCoord(self, query, start, end):
-        sqlCmd = 'select * from %s where chr="%s" and ((start between least(%d,%d) and greatest(%d,%d) or end between least(%d,%d) and greatest(%d,%d)) or (least(start,end)<=least(%d,%d) and greatest(start,end)>=greatest(%d,%d)))  ;' % (self._table, query, start, end, start, end, start, end, start, end, start, end, start, end)
-        return self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-    
-    ## Give a list of Map instances having a given sequence name
-    #
-    # @param seqName string sequence name
-    # @return lMap list of instances
-    #
-    def getMapListFromSeqName(self, seqName):
-        lMap = self.getListFromSeqName( seqName )
-        return lMap
-    
-#TODO: Check getListFromSeqName method: uses name instead of seqname
-#    ## Give a list of Map instances having a given sequence name from list
-#    #
-#    # @param lSeqName string sequence name list
-#    # @return lMap list of instances
-#    #
-#    def getMapListFromSeqNameList(self, lSeqName):
-#        lMap = []
-#        [lMap.extend(self.getListFromSeqName(seqName)) for seqName in lSeqName]
-#        return lMap
-    
-    ## Give a list of Map instances having a given chromosome
-    #
-    # @param chr string chromosome
-    # @return lMap list of instances
-    #
-    def getMapListFromChr(self, chr):
-        sqlCmd = "SELECT * FROM %s WHERE chr='%s'" % (self._table, chr)
-        lMap = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lMap
-
-    ## Give a list of the distinct seqName/chr present in the table
-    #
-    # @return lDistinctContigNames string list
-    #
-    def getSeqNameList(self):
-        sqlCmd = "SELECT DISTINCT chr FROM %s" % ( self._table )
-        lDistinctContigNames = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        return lDistinctContigNames
-    
-    ## Return a list of Set instances from a given sequence name
-    #
-    # @param seqName string sequence name
-    # @return lSets list of Set instances
-    # 
-    def getSetListFromSeqName( self, seqName ):
-        lMaps = self.getListFromSeqName( seqName )
-        lSets = MapUtils.mapList2SetList( lMaps )
-        return lSets
-    
-    ## Give a map instances list overlapping a given region
-    #
-    # @param seqName string seq name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lMap list of map instances
-    #
-    def getMapListOverlappingCoord(self, seqName, start, end):
-        lMap = self.getListOverlappingCoord(seqName, start, end)
-        return lMap
-    
-    ## Return a list of Set instances overlapping a given sequence
-    #   
-    # @param seqName string sequence name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListOverlappingCoord( self, seqName, start, end ):
-        lMaps = self.getListOverlappingCoord( seqName, start, end )
-        lSets = MapUtils.mapList2SetList( lMaps )
-        return lSets
-    
-    ## Give a dictionary which keys are Map names and values the corresponding Map instances
-    #
-    # @return dName2Maps dict which keys are Map names and values the corresponding Map instances
-    #
-    def getDictPerName( self ):
-        dName2Maps = {}
-        lMaps = self.getListOfAllMaps()
-        for iMap in lMaps:
-            if dName2Maps.has_key( iMap.name ):
-                if iMap == dName2Maps[ iMap.name ]:
-                    continue
-                else:
-                    msg = "ERROR: in table '%s' two different Map instances have the same name '%s'" % ( self._table, iMap.name )
-                    sys.stderr.write( "%s\n" % ( msg ) )
-                    sys.exit(1)
-            dName2Maps[ iMap.name ] = iMap
-        return dName2Maps
-    
-    ## Return a list of Map instances with all the data contained in the table
-    #
-    # @return lMaps list of Map instances
-    #
-    def getListOfAllMaps( self ):
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        lMaps = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lMaps
-    
-    ## Give the end of map as integer
-    #
-    # @return end integer the end of map 
-    #
-    def getEndFromSeqName(self, seqName):
-        sqlCmd = "SELECT end FROM %s WHERE chr = '%s'" % (self._table, seqName)
-        end = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        return end
-    
-    def _getInstanceToAdapt(self):
-        iMap = Map()
-        return iMap
-
-    def _getTypeColumAttr2Get(self, name):
-        colum2Get = 'name'
-        type2Get = '%s'
-        attr2Get = name
-        return colum2Get, type2Get, attr2Get
-    
-    def _getTypeAndAttr2Insert(self, map):
-        type2Insert = ("'%s'","'%s'","'%d'","'%d'")
-        attr2Insert = (map.name, map.seqname, map.start, map.end)
-        return type2Insert, attr2Insert
-
-    def _escapeAntislash(self, obj):
-        obj.name = obj.name.replace("\\", "\\\\")
-        obj.seqname = obj.seqname.replace("\\", "\\\\")
--- a/commons/core/sql/TableMatchAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-from commons.core.sql.TableAdaptator import TableAdaptator
-from commons.core.sql.ITableMatchAdaptator import ITableMatchAdaptator
-from commons.core.coord.Match import Match
-
-## Adaptator for Match table
-#
-class TableMatchAdaptator( TableAdaptator, ITableMatchAdaptator ):
-        
-    ## Give a list of Match instances given a query name
-    #
-    # @param query string sequence name
-    # @return lMatches list of Match instances
-    #
-    def getMatchListFromQuery( self, query ):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s';" % ( self._table, query )
-        return self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-    
-    ## Give a list of Match instances having the same identifier
-    #
-    # @param id integer identifier number
-    # @return lMatch a list of Match instances
-    #
-    def getMatchListFromId( self, id ):
-        sqlCmd = "SELECT * FROM %s WHERE path='%d';" % ( self._table, id )
-        lMatch = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lMatch
-    
-    ## Give a list of Match instances according to the given list of identifier numbers
-    #
-    # @param lId integer list 
-    # @return lMatch a list of Match instances
-    # 
-    def getMatchListFromIdList( self, lId ):
-        lMatch=[]
-        if lId == []:
-            return lMatch
-        sqlCmd = "select * from %s where path=%d" % (self._table, lId[0])
-        for i in lId[1:]:
-            sqlCmd += " or path=%d" % (i)
-        sqlCmd += ";"
-        lMatch = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lMatch
-    
-    ## Give the data contained in the table as a list of Match instances
-    #
-    # @return lMatchs list of match instances
-    #
-    def getListOfAllMatches( self ):
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        lMatches = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lMatches    
-    
-    def _getInstanceToAdapt(self):
-        iMatch = Match()
-        return iMatch
-    
-    def _getTypeAndAttr2Insert(self, match):
-        type2Insert = ("'%s'","'%d'","'%d'","'%d'","'%f'","'%f'","'%s'","'%d'","'%d'","'%d'","'%f'","'%g'","'%d'","'%f'","'%d'")
-        attr2Insert = ( match.range_query.seqname, match.range_query.start, \
-                        match.range_query.end, match.query_length, match.query_length_perc, \
-                        match.match_length_perc, match.range_subject.seqname, match.range_subject.start,\
-                        match.range_subject.end, match.subject_length, match.subject_length_perc, \
-                        match.e_value, match.score, match.identity, \
-                        match.id)
-        return type2Insert, attr2Insert
-    
-    def _escapeAntislash(self, obj):
-        obj.range_query.seqname = obj.range_query.seqname.replace("\\", "\\\\")
-        obj.range_subject.seqname = obj.range_subject.seqname.replace("\\", "\\\\")
--- a/commons/core/sql/TablePathAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,673 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-from commons.core.coord.Path import Path
-from commons.core.coord.PathUtils import PathUtils
-from commons.core.sql.TableAdaptator import TableAdaptator
-from commons.core.sql.ITablePathAdaptator import ITablePathAdaptator
-
-
-## Adaptator for a Path table
-#
-class TablePathAdaptator( TableAdaptator, ITablePathAdaptator ):
-
-    ## Give a list of Path instances having the same identifier
-    #
-    # @param id integer identifier number
-    # @return lPath a list of Path instances
-    #
-    def getPathListFromId( self, id ):
-        sqlCmd = "SELECT * FROM %s WHERE path='%d';" % ( self._table, id )
-        lPath = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPath
-    
-    ## Give a list of Path instances according to the given list of identifier numbers
-    #
-    # @param lId integer list 
-    # @return lPath a list of Path instances
-    #
-    def getPathListFromIdList( self, lId ):
-        lPath=[]
-        if lId == []:
-            return lPath
-        sqlCmd = "select * from %s where path=%d" % (self._table, lId[0])
-        for i in lId[1:]:
-            sqlCmd += " or path=%d" % (i)
-        sqlCmd += ";"
-        lPath = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPath
-    
-    ## Give a list of Path instances having the same given query name
-    #
-    # @param query string name of the query 
-    # @return lPath a list of Path instances
-    #
-    def getPathListFromQuery( self, query ):
-        lPath = self._getPathListFromTypeName("query", query)
-        return lPath
-    
-    ## Give a list of Path instances having the same given subject name
-    #
-    # @param subject string name of the subject 
-    # @return lPath a list of Path instances
-    #
-    def getPathListFromSubject( self, subject ):
-        lPath = self._getPathListFromTypeName("subject", subject)
-        return lPath
-    
-    ## Give a list of the distinct subject names present in the table
-    #
-    # @return lDistinctSubjectNames string list
-    #
-    def getSubjectList(self):
-        lDistinctSubjectNames = self._getDistinctTypeNamesList("subject")
-        return lDistinctSubjectNames
-    
-    ## Give a list of the distinct query names present in the table
-    #
-    # @return lDistinctQueryNames string list
-    #
-    def getQueryList(self):
-        lDistinctQueryNames = self._getDistinctTypeNamesList("query")
-        return lDistinctQueryNames
-    
-    ## Give a list of the distinct query names present in the table
-    # @note method to have correspondence with getSeqNameList() in TableSetAdaptator (for srptAutoPromote.py)
-    #
-    # @return lDistinctContigNames string list
-    #
-    def getSeqNameList(self):
-        return self.getQueryList()
-    
-    ## Give a list with all the distinct identifiers corresponding to the query
-    #
-    # @param query string name of the subject 
-    # @return lId a list of integer
-    #
-    def getIdListFromQuery( self, query ):
-        lId = self._getIdListFromTypeName("query", query)
-        return lId
-    
-    ## Give a list with all the distinct identifiers corresponding to the subject
-    #
-    # @param subject string name of the subject 
-    # @return lId a list of integer
-    #
-    def getIdListFromSubject( self, subject ):
-        lId = self._getIdListFromTypeName("subject", subject)
-        return lId
-    
-    ## Give a list of identifiers contained in the table
-    #
-    # @return lId integer list
-    #
-    def getIdList(self):
-        sqlCmd = "SELECT DISTINCT path from %s;" % (self._table)
-        lId = self._iDb.getIntegerListWithSQLCmd( sqlCmd )
-        return lId
-        
-    ## Give a list of the distinct subject names present in the table given a query name
-    #
-    # @param queryName string 
-    # @return lDistinctSubjectNamesPerQuery string list
-    #
-    def getSubjectListFromQuery( self, queryName ):
-        sqlCmd = "SELECT DISTINCT subject_name FROM %s WHERE query_name='%s'" % ( self._table, queryName )
-        lDistinctSubjectNamesPerQuery = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        return lDistinctSubjectNamesPerQuery
-    
-    ## Give the data contained in the table as a list of Paths instances
-    #
-    # @return lPaths list of paths instances
-    #
-    def getListOfAllPaths( self ):
-        return self.getListOfAllCoordObject()
-    
-    ## Give a list of Path instances with the given query and subject, both on direct strand
-    #
-    # @param query string query name
-    # @param subject string subject name
-    # @return lPaths list of path instances
-    #
-    def getPathListWithDirectQueryDirectSubjectFromQuerySubject( self, query, subject ):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' AND subject_name='%s' AND query_start<query_end AND subject_start<subject_end ORDER BY query_name, subject_name, query_start;" % ( self._table, query, subject )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a list of Path instances with the given query on direct strand and the given subject on reverse strand
-    #
-    # @param query string query name
-    # @param subject string subject name
-    # @return lPaths list of path instances
-    #
-    def getPathListWithDirectQueryReverseSubjectFromQuerySubject( self, query, subject ):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' AND subject_name='%s' AND query_start<query_end AND subject_start>subject_end ORDER BY query_name, subject_name, query_start;" % ( self._table, query, subject )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-
-    ## Give the number of Path instances with the given query name
-    #
-    # @param query string query name
-    # @return pathNb integer the number of Path instances
-    #
-    def getNbPathsFromQuery( self, query ):
-        pathNb = self._getPathsNbFromTypeName("query", query)
-        return pathNb
-    
-    ## Give the number of Path instances with the given subject name
-    #
-    # @param subject string subject name
-    # @return pathNb integer the number of Path instances
-    #
-    def getNbPathsFromSubject( self, subject ):
-        pathNb = self._getPathsNbFromTypeName("subject", subject)
-        return pathNb
-    
-    ## Give the number of distinct path identifiers
-    #
-    # @return idNb integer the number of Path instances
-    #
-    def getNbIds( self ):
-        sqlCmd = "SELECT COUNT( DISTINCT path ) FROM %s" % ( self._table )
-        idNb = self._iDb.getIntegerWithSQLCmd( sqlCmd )
-        return idNb
-    
-    ## Give the number of distinct path identifiers for a given subject
-    #
-    # @param subjectName string subject name
-    # @return idNb integer the number of Path instances
-    #
-    def getNbIdsFromSubject( self, subjectName ):
-        idNb = self._getIdNbFromTypeName("subject", subjectName)
-        return idNb
-    
-    ## Give the number of distinct path identifiers for a given query
-    #
-    # @param queryName string query name
-    # @return idNb integer the number of Path instances
-    #
-    def getNbIdsFromQuery( self, queryName ):
-        idNb = self._getIdNbFromTypeName("query", queryName)
-        return idNb
-    
-    ## Give a list of Path instances included in a given query region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lPaths list of Path instances
-    #
-    def getPathListIncludedInQueryCoord( self, query, start, end ):
-        if( start > end ):
-            tmp = start
-            start = end
-            end = tmp
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' AND query_start>=%i AND query_end<=%i" % ( self._table, query, start, end )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a list of Path instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lPath list of Path instances
-    #
-    def getPathListOverlappingQueryCoord( self, query, start, end ):
-        if( start > end ):
-            tmp = start
-            start = end
-            end = tmp
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s'" % ( self._table, query )
-        sqlCmd += " AND ( ( query_start < %i AND query_end >= %i AND query_end <= %i )" % ( start, start, end )
-        sqlCmd += " OR ( query_start >= %i AND query_end <= %i )" % ( start, end )
-        sqlCmd += " OR ( query_start >= %i AND query_start <= %i AND query_end > %i )" % ( start, end, end )
-        sqlCmd += " OR ( query_start < %i AND query_end > %i ) )" % ( start, end )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a list of Path instances overlapping a given region
-    #
-    # @note whole chains are returned, even if only a fragment overlap with the given region
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lPath list of Path instances
-    #
-    def getChainListOverlappingQueryCoord( self, query, start, end ):
-        if( start > end ):
-            tmp = start
-            start = end
-            end = tmp
-        sqlCmd = "SELECT DISTINCT path FROM %s WHERE query_name='%s'" % ( self._table, query )
-        sqlCmd += " AND ( ( query_start <= %i AND query_end >= %i AND query_end <= %i )" % ( start, start, end )
-        sqlCmd += " OR ( query_start >= %i AND query_end <= %i )" % ( start, end )
-        sqlCmd += " OR ( query_start >= %i AND query_start <= %i AND query_end >= %i )" % ( start, end, end )
-        sqlCmd += " OR ( query_start <= %i AND query_end >= %i ) )" % ( start, end )
-        lIdentifiers = self._iDb.getIntegerListWithSQLCmd( sqlCmd )
-        lPaths = self.getPathListFromIdList( lIdentifiers )
-        return lPaths
-    
-    ## Give a list of Set instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListOverlappingQueryCoord(self, query, start, end):
-        lPath = self.getPathListOverlappingQueryCoord(query, start, end)
-        lSet = PathUtils.getSetListFromQueries(lPath)
-        return lSet
-    
-    ## Give a list of Set instances included in a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListIncludedInQueryCoord(self, query, start, end):
-        lPath=self.getPathListIncludedInQueryCoord(query, start, end)
-        lSet = PathUtils.getSetListFromQueries(lPath) 
-        return lSet
-    
-    ## Give a a list of Path instances sorted by query coordinates
-    #
-    # @return lPaths list of Path instances
-    #
-    def getPathListSortedByQueryCoord( self ):
-        sqlCmd = "SELECT * FROM %s ORDER BY query_name, LEAST(query_start,query_end)" % ( self._table )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a a list of Path instances sorted by query coordinates for a given query
-    #
-    # @return lPaths list of Path instances
-    #
-    def getPathListSortedByQueryCoordFromQuery( self, queryName ):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' ORDER BY LEAST(query_start,query_end)" % ( self._table, queryName )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a a list of Path instances sorted by query coordinates and score for a given query
-    #
-    # @return lPaths list of Path instances
-    #
-    def getPathListSortedByQueryCoordAndScoreFromQuery(self, queryName):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' ORDER BY query_start, query_end, score" % (self._table, queryName)
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    ## Give a cumulative length of all paths (fragments) for a given subject name
-    #
-    # @param subjectName string subject name
-    # @return nb Cumulative length for all path
-    #
-    # @warning doesn't take into account the overlaps !!
-    #
-    def getCumulLengthFromSubject( self, subjectName ):
-        sqlCmd = "SELECT SUM(ABS(query_end-query_start)+1) FROM %s WHERE subject_name='%s'" % ( self._table, subjectName )
-        nb = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        return nb
-    
-    ## Give a list of the length of all chains of paths for a given subject name
-    #
-    # @param subjectName string  name of the subject
-    # @return lChainLengths list of lengths per chain of paths
-    #
-    # @warning doesn't take into account the overlaps !!
-    #
-    def getChainLengthListFromSubject( self, subjectName ):
-        sqlCmd = "SELECT SUM(ABS(query_end-query_start)+1) FROM %s WHERE subject_name='%s' GROUP BY PATH" % ( self._table, subjectName )
-        lChainLengths = self._iDb.getIntegerListWithSQLCmd(sqlCmd)
-        return lChainLengths
-    
-    ## Give a list of identity of all chains of paths for a given subject name
-    #
-    # @param subjectName string name of the subject
-    # @return lChainIdentities list of identities per chain of paths
-    #
-    # @warning doesn't take into account the overlaps !!
-    #
-    def getChainIdentityListFromSubject( self, subjectName ):
-        lChainIdentities = []
-        sqlCmd = "SELECT SUM(identity*(ABS(query_start-query_end)+1)) / SUM(ABS(query_end-query_start)+1) FROM %s WHERE subject_name='%s' GROUP BY PATH" % ( self._table, subjectName )
-        self._iDb.execute( sqlCmd )
-        res = self._iDb.fetchall()
-        for i in res:
-            if i[0] != None:
-                lChainIdentities.append( round( float( i[0] ), 2 ) )
-        return lChainIdentities
-    
-    ## Give a list of the length of all paths for a given subject name
-    #
-    # @param subjectName string name of the subject
-    # @return lPathLengths list of lengths per path
-    #
-    # @warning doesn't take into account the overlaps !!
-    #
-    def getPathLengthListFromSubject( self, subjectName ):
-        sqlCmd = "SELECT ABS(query_end-query_start)+1 FROM %s WHERE subject_name='%s'" % ( self._table, subjectName )
-        lPathLengths = self._iDb.getIntegerListWithSQLCmd(sqlCmd)
-        return lPathLengths
-
-    ## Give a a list with all distinct identifiers for a given subject sorted in decreasing order according to the length of the chains
-    #    
-    # @param subjectName string subject name
-    # @return lPathNums a list of paths Id
-    #
-    def getIdListSortedByDecreasingChainLengthFromSubject( self, subjectName ):
-        sqlCmd = "SELECT DISTINCT path, SUM( ABS(query_end - query_start) + 1 ) AS length"
-        sqlCmd += " FROM %s" % ( self._table )
-        sqlCmd += " WHERE subject_name='%s'" % ( subjectName )
-        sqlCmd += " GROUP BY path"
-        sqlCmd += " ORDER BY length DESC";
-        lPathNums = self._iDb.getIntegerListWithSQLCmd(sqlCmd)
-        return lPathNums
-
-    ## Give a a list with all distinct identifiers for a given subject where the chain lengths is above a given threshold
-    #    
-    # @param subjectName string subject name
-    # @lengthThreshold length threshold below which chains are filtered
-    # @return lPathNums a list of paths Id
-    #
-    def getIdListFromSubjectWhereChainsLongerThanThreshold( self, subjectName, lengthThreshold ):
-        lPathNums = []
-        sqlCmd = "SELECT DISTINCT path, SUM( ABS(query_end - query_start) + 1 ) AS length"
-        sqlCmd += " FROM %s" % ( self._table )
-        sqlCmd += " WHERE subject_name='%s'" % ( subjectName )
-        sqlCmd += " GROUP BY path"
-        sqlCmd += " ORDER BY length DESC";
-        self._iDb.execute( sqlCmd )
-        res = self._iDb.fetchall()
-        for i in res:
-            if int(i[1]) >= int(lengthThreshold):
-                lPathNums.append( i[0] )
-        return lPathNums
-    
-    ## Give a Set instances list of a query annotation
-    #
-    # @param query string query name
-    # @return lSets list of set instance 
-    #
-    def getSetListFromQuery(self, query):
-        lpath = self.getPathListFromQuery(query)
-        lSets = PathUtils.getSetListFromQueries(lpath)
-        return lSets
-    
-    ## Give a Set instances list of a query annotation
-    # @note method to have correspondence with getSetListFromSeqName() in TableSetAdaptator (for srptAutoPromote.py)
-    #
-    # @param query string query name
-    # @return lSets list of set instance 
-    #
-    def getSetListFromSeqName(self, query):
-        return self.getSetListFromQuery(query)
-    
-    ## Delete path corresponding to a given identifier number
-    #
-    # @param id integer identifier number
-    #
-    def deleteFromId(self,id):
-        sqlCmd = "delete from %s where path=%d;" % (self._table, id)
-        self._iDb.execute(sqlCmd)
-
-    ## Delete path corresponding to a given object path line
-    #
-    # @param path object 
-    #
-    def deleteFromPath(self,path):
-        sqlCmd = "delete from %s where path=%d and query_name='%s' and query_start=%s and query_end=%s and subject_name='%s' and subject_start=%s and subject_end=%s and E_value=%s and score=%s" % (self._table, path.getIdentifier(), path.getQueryName(), path.getQueryStart(), path.getQueryEnd(), path.getSubjectName(), path.getSubjectStart(), path.getSubjectEnd(), path.getEvalue(), int(path.getScore()))
-        self._iDb.execute(sqlCmd)
-
-    ## Delete path corresponding to a given list of identifier number
-    #
-    # @param lId list of identifier number
-    #
-    def deleteFromIdList(self,lId):
-        if lId == []:
-            return        
-        sqlCmd = "delete from %s where path=%d" % (self._table, lId[0])
-        for id in lId[1:]:
-            sqlCmd += " or path=%d" %(id)
-        sqlCmd += ";"
-        self._iDb.execute(sqlCmd)
-
-    ## Get a new id number
-    #
-    # @return newId integer new id
-    #
-    def getNewId(self):
-        sqlCmd = 'select max(path) from %s;' % (self._table)
-        maxId = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        newId = int(maxId)+1
-        return newId
-    
-    ##  Join two path by changing id number of id1 and id2 path to the least of id1 and id2
-    #
-    # @param id1 integer id path number
-    # @param id2 integer id path number
-    # @return newId integer minimum of id1 id2
-    # @note this method modify the ID even if this one not existing in the path table  
-    #     
-    def joinTwoPaths(self, id1, id2):
-        if id1 < id2:
-            newId = id1
-            oldId = id2
-        else:
-            newId = id2
-            oldId = id1
-        sqlCmd = "UPDATE %s SET path=%d WHERE path=%d"\
-                % (self._table, newId, oldId)
-        self._iDb.execute(sqlCmd)
-        return newId
-    
-    ## Create a 'pathRange' table from a 'path' table. 
-    # The output table summarizes the information per identifier. 
-    # The min and max value are taken. 
-    # The identity is averaged over the fragments. 
-    # It may overwrite an existing table.
-    #
-    # @param outTable string name of the output table
-    # @return outTable string Table which summarizes the information per identifier
-    #
-    def path2PathRange( self, outTable="" ):
-        return self._path2PathRangeOrPath2PathRangeQuery(outTable)
-  
-    ## Create a 'pathrange' table from a 'path' table for the given query name
-    #  The output table summarizes the information per identifier
-    #  The min and max value are taken
-    #  The identity is averaged over the fragments, weighted by the length of the of the query
-    #  It may overwrite an existing table
-    #
-    # @param outTable string name of the output table
-    # @param query string query name
-    # @return outTable string  Table which summarizes the information per identifier
-    #
-    def _path2PathRangeFromQuery( self, queryName, outTable="" ):
-        return self._path2PathRangeOrPath2PathRangeQuery(outTable, queryName)
-    
-    def _path2PathRangeOrPath2PathRangeQuery(self, outTable, queryName=""):
-        self._iDb.createIndex( self._table, "path" )
-        if outTable == "":
-            outTable = "%s_range" % ( self._table )
-        self._iDb.dropTable( outTable )
-        
-        tmpTable = "%s_tmp" % ( self._table )
-        self._iDb.dropTable( tmpTable )
-        
-        sqlCmd = self._genSqlCmdForTmpTableAccordingToQueryName(queryName, tmpTable)
-        self._iDb.execute(sqlCmd)
-            
-        sqlCmd = "CREATE TABLE %s SELECT path, query_name, MIN(query_start) AS query_start, MAX(query_end) AS query_end, subject_name, MIN(subject_start) AS subject_start, MAX(subject_end) AS subject_end, MIN(e_value) AS e_value, SUM(score) AS score, TRUNCATE(SUM(identity)/SUM(ABS(query_end-query_start)+1),2) AS identity FROM %s WHERE query_start<query_end AND subject_start<subject_end GROUP BY path;" % ( outTable, tmpTable )
-        self._iDb.execute( sqlCmd )
-        
-        sqlCmd = "INSERT into %s SELECT path, query_name, MIN(query_start) AS query_start, MAX(query_end) AS query_end, subject_name, MAX(subject_start) AS subject_start, MIN(subject_end) AS subject_end, MIN(e_value) AS e_value, SUM(score) AS score, TRUNCATE(SUM(identity)/SUM(ABS(query_end-query_start)+1),2) AS identity FROM %s WHERE query_start<query_end AND subject_start>subject_end GROUP BY path;" % ( outTable, tmpTable )
-        self._iDb.execute( sqlCmd )
-        
-        self._iDb.createIndex( outTable, "path" )
-        self._iDb.dropTable( tmpTable )
-        return outTable
-            
-    ## Give a list of Path lists sorted by weighted identity.
-    #
-    # @return lChains list of chains
-    #
-    def getListOfChainsSortedByAscIdentityFromQuery( self, qry ):
-        lChains = []
-        tmpTable = self._path2PathRangeFromQuery( qry )
-        sqlCmd = "SELECT path FROM %s ORDER BY identity" % ( tmpTable )
-        self._iDb.execute( sqlCmd )
-        lPathnums = self._iDb.fetchall()
-        self._iDb.dropTable( tmpTable )
-        for pathnum in lPathnums:
-            lChains.append( self.getPathListFromId( int(pathnum[0]) ) )
-        return lChains
-    
-    ## Give a list of path instances sorted by increasing E-value
-    #
-    # @return lPaths list of path instances
-    #
-    def getPathListSortedByIncreasingEvalueFromQuery( self, queryName ):
-        sqlCmd = "SELECT * FROM %s WHERE query_name='%s' ORDER BY E_value ASC" % ( self._table, queryName )
-        lPaths = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPaths
-    
-    
-    ## Return the number of times a given instance is present in the table
-    # The identifier is not considered,
-    # only coordinates, score, E-value and identity.
-    #
-    # @return nbOcc integer
-    #
-    def getNbOccurrences( self, iPath ):
-        sqlCmd = "SELECT COUNT(*) FROM %s WHERE" % ( self._table )
-        sqlCmd += " query_name='%s'" % ( iPath.range_query.seqname )
-        sqlCmd += " AND query_start='%s'" % ( iPath.range_query.start )
-        sqlCmd += " AND query_end='%s'" % ( iPath.range_query.end )
-        sqlCmd += " AND subject_name='%s'" % ( iPath.range_subject.seqname )
-        sqlCmd += " AND subject_start='%s'" % ( iPath.range_subject.start )
-        sqlCmd += " AND subject_end='%s'" % ( iPath.range_subject.end )
-        sqlCmd += " AND score='%s'" % ( iPath.score )
-        sqlCmd += " AND e_value='%s'" % ( iPath.e_value )
-        sqlCmd += " AND identity='%s'" % ( iPath.identity )
-        nbOcc = self._iDb.getIntegerWithSQLCmd( sqlCmd )
-        return nbOcc
-    
-    
-    def _getPathListFromTypeName( self, type, typeName ):
-        sqlCmd = "SELECT * FROM %s WHERE %s_name='%s';" % ( self._table, type, typeName )
-        lPath = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lPath
-    
-    def _getDistinctTypeNamesList( self, type ):
-        sqlCmd = "SELECT DISTINCT %s_name FROM %s" % ( type, self._table )
-        lDistinctTypeNames = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        return lDistinctTypeNames
-    
-    def _getPathsNbFromTypeName( self, type, typeName ):
-        sqlCmd = "SELECT COUNT(*) FROM %s WHERE %s_name='%s'" % ( self._table, type, typeName )
-        pathNb = self._iDb.getIntegerWithSQLCmd( sqlCmd )
-        return pathNb
-    
-    def _getIdListFromTypeName( self, type, typeName ):
-        sqlCmd = "SELECT DISTINCT path FROM %s WHERE %s_name='%s'" % ( self._table, type, typeName )
-        lId = self._iDb.getIntegerListWithSQLCmd( sqlCmd )
-        return lId
-    
-    def _getIdNbFromTypeName( self, type, typeName ):
-        sqlCmd = "SELECT COUNT( DISTINCT path ) FROM %s WHERE %s_name='%s'" % ( self._table, type, typeName )
-        idNb = self._iDb.getIntegerWithSQLCmd( sqlCmd )
-        return idNb
-    
-    def _getTypeAndAttr2Insert(self, path):
-        type2Insert = ("'%d'", "'%s'", "'%d'", "'%d'", "'%s'", "'%d'", "'%d'", "'%g'", "'%d'", "'%f'")
-        if path.range_query.isOnDirectStrand():
-            queryStart = path.range_query.start
-            queryEnd = path.range_query.end
-            subjectStart = path.range_subject.start
-            subjectEnd = path.range_subject.end
-        else:
-            queryStart = path.range_query.end
-            queryEnd = path.range_query.start
-            subjectStart = path.range_subject.end
-            subjectEnd = path.range_subject.start
-        attr2Insert = ( path.id,\
-                     path.range_query.seqname,\
-                     queryStart,\
-                     queryEnd,\
-                     path.range_subject.seqname,\
-                     subjectStart,\
-                     subjectEnd,\
-                     path.e_value,\
-                     path.score,\
-                     path.identity\
-                     )
-        return type2Insert, attr2Insert
-    
-    def _getInstanceToAdapt(self):
-        iPath = Path()
-        return iPath
-    
-    def _escapeAntislash(self, obj):
-        obj.range_query.seqname = obj.range_query.seqname.replace("\\", "\\\\")
-        obj.range_subject.seqname = obj.range_subject.seqname.replace("\\", "\\\\")
-    
-    def _genSqlCmdForTmpTableAccordingToQueryName(self, queryName, tmpTable):
-        sqlCmd = ""
-        if queryName == "":
-            sqlCmd = "CREATE TABLE %s SELECT path, query_name, query_start, query_end, subject_name, subject_start, subject_end, e_value, score, (ABS(query_end-query_start)+1)*identity AS identity FROM %s" % (tmpTable, self._table)
-        else:
-            sqlCmd = "CREATE TABLE %s SELECT path, query_name, query_start, query_end, subject_name, subject_start, subject_end, e_value, score, (ABS(query_end-query_start)+1)*identity AS identity FROM %s WHERE query_name='%s'" % (tmpTable, self._table, queryName)
-        return sqlCmd
-        
-    ## return a filtered list with only one unique occurrence of path of a given list
-    #
-    # @param lPath a list of Path instances
-    # @return lUniquePath a list of Path instances
-    #
-    def getListOfUniqueOccPath(self, lPath):
-        if len(lPath) < 2 :
-            return lPath
-        
-        sortedListPath = sorted(lPath, key=lambda iPath: ( iPath.range_query.getSeqname(), iPath.range_query.getStart(), iPath.range_query.getEnd(), iPath.range_subject.getSeqname(), iPath.range_subject.getStart(), iPath.range_subject.getEnd()))
-        lUniquePath = []    
-        for i in xrange(1, len(sortedListPath)):
-            previousPath =  sortedListPath [i-1]
-            currentPath =  sortedListPath [i]
-            if previousPath != currentPath:
-                lUniquePath.append(previousPath)
-        
-        if previousPath != currentPath:
-            lUniquePath.append(currentPath)  
-                  
-        return lUniquePath       
\ No newline at end of file
--- a/commons/core/sql/TableSeqAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,185 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import sys
-from commons.core.sql.TableAdaptator import TableAdaptator
-from commons.core.sql.ITableSeqAdaptator import ITableSeqAdaptator
-from commons.core.coord.SetUtils import SetUtils
-from commons.core.seq.Bioseq import Bioseq
-
-
-## Adaptator for a Seq table
-#
-class TableSeqAdaptator( TableAdaptator, ITableSeqAdaptator ):
-    
-    ## Retrieve all the distinct accession names in a list.
-    #
-    # @return lAccessions list of accessions
-    #
-    def getAccessionsList( self ):
-        sqlCmd = "SELECT DISTINCT accession FROM %s;" % ( self._table )
-        lAccessions = self._getStringListWithSQLCmd(sqlCmd)
-        return lAccessions
-    
-    ## Save sequences in a fasta file from a list of accession names.
-    # 
-    # @param lAccessions list of accessions
-    # @param outFileName string Fasta file
-    #
-    def saveAccessionsListInFastaFile( self, lAccessions, outFileName ):
-        outFile = open( outFileName, "w" )
-        for ac in lAccessions:
-            bs = self.getBioseqFromHeader( ac )
-            bs.write(outFile)
-        outFile.close()
-    
-    ## Get a bioseq instance given its header
-    #
-    # @param header string name of the sequence ('accession' field in the 'seq' table) 
-    # @return bioseq instance
-    #
-    def getBioseqFromHeader( self, header ):
-        sqlCmd = "SELECT * FROM %s WHERE accession='%s';" % ( self._table, header )
-        self._iDb.execute( sqlCmd )
-        res = self._iDb.fetchall()
-        return Bioseq( res[0][0], res[0][1] )
-        
-    ## Retrieve the length of a sequence given its name.
-    #
-    # @param accession name of the sequence
-    # @return seqLength integer length of the sequence
-    # 
-    def getSeqLengthFromAccession( self, accession ):
-        sqlCmd = 'SELECT length FROM %s WHERE accession="%s"' % ( self._table, accession )
-        seqLength = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        return seqLength
-    
-    ## Retrieve the length of a sequence given its description.
-    #
-    # @param description of the sequence
-    # @return seqLength integer length of the sequence
-    # 
-    def getSeqLengthFromDescription( self, description ):
-        sqlCmd = 'SELECT length FROM %s WHERE description="%s"' % ( self._table, description )
-        seqLength = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        return seqLength
-        
-    ## Retrieve all the accessions with length in a list of tuples
-    #
-    # @return lAccessionLengthTuples list of tuples
-    # 
-    def getAccessionAndLengthList(self):
-        sqlCmd = 'SELECT accession, length FROM %s' % self._table
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.fetchall()
-        lAccessionLengthTuples = []
-        for i in res:
-            lAccessionLengthTuples.append(i)
-        return lAccessionLengthTuples
-    
-    ## get subsequence according to given parameters
-    #
-    # @param accession 
-    # @param start integer 
-    # @param end integer
-    # @return bioseq.sequence string
-    #
-    def getSubSequence( self, accession, start, end ):
-        bs = Bioseq()
-        if start <= 0 or end <= 0:
-            print "ERROR with coordinates start=%i or end=%i" % ( start, end )
-            sys.exit(1)
-            
-        if accession not in self.getAccessionsList():
-            print "ERROR: accession '%s' absent from table '%s'" % ( accession, self._table )
-            sys.exit(1)
-            
-        lengthAccession = self.getSeqLengthFromAccession( accession )
-        if start > lengthAccession or end > lengthAccession:
-            print "ERROR: coordinates start=%i end=%i out of sequence '%s' range (%i bp)" % ( start, end, accession, lengthAccession )
-            sys.exit(1)
-            
-        sqlCmd = "SELECT SUBSTRING(sequence,%i,%i) FROM %s WHERE accession='%s'" % ( min(start,end), abs(end-start)+ 1, self._table, accession )
-        self._iDb.execute( sqlCmd )
-        res = self._iDb.fetchall()
-        bs.setSequence( res[0][0] )
-        if start > end:
-            bs.reverseComplement()
-        return bs.sequence
-    
-    ## get bioseq from given set list
-    #
-    # @param lSets set list of sets 
-    # @return bioseq instance
-    #
-    def getBioseqFromSetList( self, lSets ):
-        header = "%s::%i %s " % ( lSets[0].name, lSets[0].id, lSets[0].seqname )
-        sequence = ""
-        lSortedSets = SetUtils.getSetListSortedByIncreasingMinThenMax( lSets )
-        if not lSets[0].isOnDirectStrand():
-            lSortedSets.reverse()
-        for iSet in lSortedSets:
-            header += "%i..%i," % ( iSet.getStart(), iSet.getEnd() )
-            sequence += self.getSubSequence( iSet.seqname, iSet.getStart(), iSet.getEnd() )
-        return Bioseq( header[:-1], sequence )
-    
-    ## Return True if the given accession is present in the table
-    #
-    def isAccessionInTable( self, name ):
-        sqlCmd = "SELECT accession FROM %s WHERE accession='%s'" % ( self._table, name )
-        self._iDb.execute( sqlCmd )
-        res = self._iDb.fetchall()
-        return bool(res)
-    
-    ## Retrieve all the distinct accession names in a fasta file.
-    #
-    # @param outFileName string Fasta file
-    # 
-    def exportInFastaFile(self, outFileName ):
-        lAccessions = self.getAccessionsList()
-        self.saveAccessionsListInFastaFile( lAccessions, outFileName )
-        
-    def _getStringListWithSQLCmd( self, sqlCmd ):
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.fetchall()
-        lString = []
-        for i in res:
-            lString.append(i[0])
-        return lString
-   
-    def _getTypeAndAttr2Insert(self, bs):
-        type2Insert =  ( "'%s'", "'%s'", "'%s'", "'%i'" ) 
-        attr2Insert =  (bs.header.split()[0], bs.sequence, bs.header, bs.getLength())
-        return type2Insert, attr2Insert
-    
-    def _escapeAntislash(self, obj):
-        pass
-
--- a/commons/core/sql/TableSetAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,215 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-from commons.core.sql.ITableSetAdaptator import ITableSetAdaptator
-from commons.core.sql.TableAdaptator import TableAdaptator
-from commons.core.coord.Set import Set
-
-
-## Adaptator for a Set table
-#
-class TableSetAdaptator( TableAdaptator, ITableSetAdaptator ):
-            
-    ## Give a list of Set instances having a given seq name
-    #
-    # @param seqName string seq name
-    # @return lSet list of instances
-    #
-    def getListFromSeqName( self, seqName ):
-        sqlCmd = "SELECT * FROM %s" % (self._table)
-        colum2Get, type2Get, attr2Get = self._getTypeColumAttr2Get(seqName)
-        sqlCmd += " WHERE " + colum2Get
-        sqlCmd += " = "
-        sqlCmd = sqlCmd + type2Get
-        sqlCmd = sqlCmd % "'" + attr2Get + "'"
-        lSet = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lSet
-        
-    ## Give a list of set instances overlapping a given region
-    #
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of set instances
-    #
-    def getListOverlappingCoord(self, query, start, end):
-        sqlCmd = 'select * from %s where chr="%s" and ((start between least(%d,%d) and greatest(%d,%d) or end between least(%d,%d) and greatest(%d,%d)) or (least(start,end)<=least(%d,%d) and greatest(start,end)>=greatest(%d,%d)))  ;' % (self._table, query, start, end, start, end, start, end, start, end, start, end, start, end)
-        lSet = self._iDb.getObjectListWithSQLCmd( sqlCmd, self._getInstanceToAdapt )
-        return lSet
-
-    #TODO: to test !!!
-    ## Give a list of Set instances overlapping a given region
-    #
-    # @note whole chains are returned, even if only a fragment overlap with the given region
-    # @param query string query name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSets list of Path instances
-    #
-    def getChainListOverlappingCoord(self, query, start, end):
-        sqlCmd = "select distinct path from %s where chr='%s' and ((start between least(%d,%d) and greatest(%d,%d) or end between least(%d,%d) and greatest(%d,%d)) or (least(start,end)<=least(%d,%d) and greatest(start,end)>=greatest(%d,%d)));" % (self._table, query,start,end,start,end,start,end,start,end,start,end,start,end)
-        lIdentifiers = self._iDb.getIntegerListWithSQLCmd(sqlCmd)
-        lSets = self.getSetListFromIdList(lIdentifiers)
-        return lSets
-
-    ## Give a list of identifier numbers contained in the table
-    #
-    # @return lId integer list
-    #
-    def getIdList(self):
-        sqlCmd = "select distinct path from %s;" % (self._table)
-        lId = self._iDb.getIntegerListWithSQLCmd( sqlCmd )
-        return lId
-    
-    ## Give a list of the distinct seqName/chr present in the table
-    #
-    # @return lDistinctContigNames string list
-    #
-    def getSeqNameList(self):
-        sqlCmd = "SELECT DISTINCT chr FROM %s" % ( self._table )
-        lDistinctContigNames = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        return lDistinctContigNames
-    
-    ## Give a list of Set instances having a given seq name
-    #
-    # @param seqName string seq name
-    # @return lSet list of instances
-    #
-    def getSetListFromSeqName( self, seqName):
-        lSets = self.getListFromSeqName(seqName)
-        return lSets
-    
-    ## Give a set instances list with a given identifier number
-    #
-    # @param id integer identifier number
-    # @return lSet list of set instances
-    #
-    def getSetListFromId(self, id):
-        SQLCmd = "select * from %s where path=%d;" % (self._table, id)
-        return self._iDb.getObjectListWithSQLCmd( SQLCmd, self._getInstanceToAdapt )
-   
-    ## Give a set instances list with a list of identifier numbers
-    #
-    # @param lId integers list identifiers list numbers
-    # @return lSet list of set instances
-    #   
-    def getSetListFromIdList(self,lId):
-        lSet = []
-        if lId == []:
-            return lSet
-        SQLCmd = "select * from %s where path=%d" % (self._table, lId[0])
-        for i in lId[1:]:
-            SQLCmd += " or path=%d" % (i)
-        SQLCmd += ";"
-        return self._iDb.getObjectListWithSQLCmd( SQLCmd, self._getInstanceToAdapt )
-    
-    ## Return a list of Set instances overlapping a given sequence
-    #   
-    # @param seqName string sequence name
-    # @param start integer start coordinate
-    # @param end integer end coordinate
-    # @return lSet list of Set instances
-    #
-    def getSetListOverlappingCoord( self, seqName, start, end ):
-        lSet = self.getListOverlappingCoord( seqName, start, end )
-        return lSet
-    
-    ## Delete set corresponding to a given identifier number
-    #
-    # @param id integer identifier number
-    #  
-    def deleteFromId(self, id):
-        sqlCmd = "delete from %s where path=%d;" % (self._table, id)
-        self._iDb.execute(sqlCmd)
-        
-    ## Delete set corresponding to a given list of identifier number
-    #
-    # @param lId integers list list of identifier number
-    #  
-    def deleteFromIdList(self, lId):
-        if lId == []:
-            return
-        sqlCmd = "delete from %s where path=%d" % ( self._table, lId[0] )
-        for i in lId[1:]:
-            sqlCmd += " or path=%d"%(i)
-        sqlCmd += ";"
-        self._iDb.execute(sqlCmd)
-        
-    ## Join two set by changing id number of id1 and id2 set to the least of id1 and id2
-    #
-    # @param id1 integer id path number
-    # @param id2 integer id path number
-    #    
-    def joinTwoSets(self, id1, id2):
-        if id1 < id2:
-            newId = id1
-            oldId = id2
-        else:
-            newId = id2
-            oldId = id1
-        sqlCmd = "UPDATE %s SET path=%d WHERE path=%d" % (self._table, newId, oldId)
-        self._iDb.execute(sqlCmd)
-    
-    ## Get a new id number
-    #
-    # @return new_id integer max_id + 1 
-    #
-    def getNewId(self):
-        sqlCmd = "select max(path) from %s;" % (self._table)
-        maxId = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        newId = int(maxId) + 1
-        return newId
-    
-    ## Give the data contained in the table as a list of Sets instances
-    #
-    # @return lSets list of set instances
-    #
-    def getListOfAllSets( self ):
-        return self.getListOfAllCoordObject()
-   
-    def _getInstanceToAdapt(self):
-            iSet = Set()
-            return iSet
-    
-    def _getTypeColumAttr2Get(self, contig):
-        colum2Get = 'chr'
-        type2Get = '%s'
-        attr2Get = contig
-        return colum2Get, type2Get, attr2Get
-    
-    def _getTypeAndAttr2Insert(self, set):
-        type2Insert = ("'%d'","'%s'","'%s'","'%d'","'%d'")
-        attr2Insert = (set.id, set.name, set.seqname, set.start, set.end)
-        return type2Insert, attr2Insert
-
-    def _escapeAntislash(self, obj):
-        obj.name = obj.name.replace("\\", "\\\\")
-        obj.seqname = obj.seqname.replace("\\", "\\\\")
--- a/commons/core/sql/test/TestSuite_sql.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import sys
-import Test_DbMySql
-import Test_TableBinPathAdaptator
-import Test_TableMapAdaptator
-import Test_TableMatchAdaptator
-import Test_TablePathAdaptator
-import Test_TableSeqAdaptator
-import Test_TableSetAdaptator
-import Test_F_RepetJob
-import Test_RepetJob
-import Test_TableBinSetAdaptator
-
-def main():
-
-        TestSuite_sql = unittest.TestSuite()
-        
-        TestSuite_sql.addTest( unittest.makeSuite( Test_DbMySql.Test_DbMySql, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableBinPathAdaptator.Test_TableBinPathAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableMapAdaptator.Test_TableMapAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableMatchAdaptator.Test_TableMatchAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableSetAdaptator.Test_TableSetAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableSeqAdaptator.Test_TableSeqAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableMatchAdaptator.Test_TableMatchAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TablePathAdaptator.Test_TablePathAdaptator, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_F_RepetJob.Test_F_RepetJob, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_RepetJob.Test_RepetJob, "test" ) )
-        TestSuite_sql.addTest( unittest.makeSuite( Test_TableBinSetAdaptator.Test_TableBinSetAdaptator, "test" ) )
-        
-        runner = unittest.TextTestRunner( sys.stderr, 2, 2 )
-        runner.run( TestSuite_sql )
-        
-        
-if __name__ == "__main__":
-    main()
--- a/commons/core/sql/test/Test_DbFactory.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-import os
-import unittest
-from commons.core.sql.DbFactory import DbFactory
-
-class Test_DbFactory( unittest.TestCase ):
-
-    def test_createInstance (self):
-        dbInstance = DbFactory.createInstance()
-        expValue = None
-        obsValue = dbInstance
-        self.assertNotEquals(expValue, obsValue)
-        
-    def test_createInstance_with_config (self):
-        configFileName = "dummyConfigFileName.cfg"
-        configF = open(configFileName,"w")
-        configF.write("[repet_env]\n")
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-        
-        dbInstance = DbFactory.createInstance(configFileName)
-        expValue = None
-        obsValue = dbInstance
-        self.assertNotEquals(expValue, obsValue)
-        os.remove(configFileName)
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_DbFactory ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_DbMySql.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1554 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-import unittest
-import time
-import os
-from MySQLdb import ProgrammingError
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.DbMySql import TABLE_SCHEMA_DESCRIPTOR
-from commons.core.sql.DbMySql import TABLE_TYPE_SYNONYMS
-from commons.core.utils.FileUtils import FileUtils
-from commons.core.coord.Path import Path
-
-class Test_DbMySql( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._iDb = DbMySql( )
-        self._uniqId = "%s" % time.strftime("%Y%m%d%H%M%S")
-
-    def tearDown( self ):
-        if self._iDb.db.open:
-            self._iDb.close()
-        self._iDb = None
-        
-    def test_execute_syntax_error(self):
-        expErrorMsg = "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'CHAUD TABLES' at line 1"
-        obsErrorMsg = ""
-        sqlCmd = "CHAUD TABLES"
-        try:
-            self._iDb.execute(sqlCmd)
-        except ProgrammingError as excep:
-            obsErrorMsg = excep.args[1]
-        
-        self.assertEquals(expErrorMsg, obsErrorMsg)
-
-    def test_execute_with_1_retry(self):
-        tableName = "dummyTable%s" % self._uniqId
-        sqlCmd = "CREATE TABLE %s (dummyColumn varchar(255))" % tableName
-        self._iDb.close()
-        self._iDb.execute(sqlCmd)
-        self.assertTrue(self._iDb.doesTableExist(tableName))
-        self._iDb.dropTable(tableName)
-
-    def test_setAttributesFromConfigFile(self):
-        expHost = "dummyHost"
-        expUser = "dummyUser"
-        expPw = "dummyPw"
-        expDb = "dummyDb"
-        expPort = 1000
-        
-        configFileName = "dummyConfigFileName.cfg"
-        f = open( configFileName, "w" )
-        f.write("[repet_env]\n")
-        f.write("repet_host: " + expHost + "\n")
-        f.write("repet_user: " + expUser + "\n")
-        f.write("repet_pw: " + expPw + "\n")
-        f.write("repet_db: " + expDb + "\n")
-        f.write("repet_port: " + str(expPort) + "\n")
-        f.close()
-        
-        self._iDb.setAttributesFromConfigFile(configFileName)
-        
-        obsHost = self._iDb.host
-        obsUser = self._iDb.user
-        obsPw = self._iDb.passwd
-        obsDb = self._iDb.dbname
-        obsPort = self._iDb.port
-        
-        os.remove(configFileName)
-        
-        self.assertEquals( expHost, obsHost )
-        self.assertEquals( expUser, obsUser )
-        self.assertEquals( expPw, obsPw )
-        self.assertEquals( expDb, obsDb )
-        self.assertEquals( expPort, obsPort )
-
-    def test_open_True(self):
-        self._iDb.close()
-        self.assertTrue(self._iDb.open())
-        self.assertEquals(1, self._iDb.db.open)
-        self._iDb.close()
-        self.assertEquals(0, self._iDb.db.open)
-        
-    def test_open_False(self):
-        self._iDb.close()
-        self._iDb.user = "dummyUser"
-        self.assertFalse( self._iDb.open() )
-        
-    def test_doesTableExist_True(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        self.assertTrue( self._iDb.doesTableExist(tableName) )
-        self._iDb.dropTable(tableName)
-        
-    def test_doesTableExist_False(self):
-        tableName = "dummyTable" + self._uniqId
-        self.assertFalse( self._iDb.doesTableExist(tableName) )
-        
-    def test_dropTable(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        self.assertFalse( self._iDb.doesTableExist(tableName) )
-        
-    def test_renameTable(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        self._iDb.updateInfoTable( tableName, "" )
-        newTableName = "newDummyTable"
-        
-        self._iDb.renameTable(tableName, newTableName)
-        
-        self.assertFalse( self._iDb.doesTableExist(tableName) )
-        self.assertTrue( self._iDb.doesTableExist(newTableName) )
-        
-        expTuple = (('newDummyTable', ''),)
-        sqlCmd = 'SELECT * FROM info_tables WHERE name = "%s"' % ( newTableName )
-        self._iDb.execute( sqlCmd )
-        obsTuple = self._iDb.cursor.fetchall()
-        self.assertEquals( expTuple, obsTuple)
-        
-        expTuple = ()
-        sqlCmd = 'SELECT * FROM info_tables WHERE name = "%s"' % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTuple = self._iDb.cursor.fetchall()
-        self.assertEquals( expTuple, obsTuple)
-        
-        self._iDb.dropTable(newTableName)
-        
-    def test_copyTable(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) );" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX idummyColumn ON %s ( dummyColumn );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        
-        newTableName = "newDummyTable"
-        
-        self._iDb.copyTable(tableName, newTableName)
-        
-        self.assertTrue( self._iDb.doesTableExist(tableName) )
-        self.assertTrue( self._iDb.doesTableExist(newTableName) )
-        
-        expTuple = (('newDummyTable', ''),)
-        sqlCmd = 'SELECT * FROM info_tables WHERE name = "%s";' % ( newTableName )
-        self._iDb.execute( sqlCmd )
-        obsTuple = self._iDb.cursor.fetchall()
-        
-        self.assertEquals( expTuple, obsTuple)
-            
-        expTuple = (('newDummyTable', 1L, 'idummyColumn', 1L, 'dummyColumn', 'A', None, None, None, 'YES', 'BTREE', ''),)
-        sqlCmd = "SHOW INDEX FROM %s;" % ( newTableName )
-        self._iDb.execute( sqlCmd )
-        obsTuple = self._iDb.cursor.fetchall()
-        self.assertEquals( expTuple, obsTuple)
-        
-        self._iDb.dropTable(tableName)
-        self._iDb.dropTable(newTableName)
-        
-    def test_getTableType(self):
-        lTypesToTest = TABLE_SCHEMA_DESCRIPTOR.keys()
-        for tableType in lTypesToTest:
-            tableName = "dummy%sTable%s" % (tableType, self._uniqId)
-            self._iDb.createTable(tableName, tableType)
-            
-            obsType = self._iDb.getTableType(tableName)
-            self.assertEquals(tableType, obsType)
-            
-            self._iDb.dropTable(tableName)
-    
-    def test_getSize_empty_table(self):
-        tableName = "dummyPathTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "")
-        pathF.close()
-        self._iDb.loadDataFromFile(tableName, pathFileName, False)
-        expSize = 0
-        obsSize = self._iDb.getSize(tableName)
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expSize, obsSize )
-        
-    def test_getSize_two_rows(self):
-        tableName = "dummyPathTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        self._iDb.loadDataFromFile(tableName, pathFileName, False)
-        expSize = 2
-        obsSize = self._iDb.getSize(tableName)
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expSize, obsSize )
-        
-    def test_isEmpty_True(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        fileName = "dummyTableFile.txt"
-        f = open( fileName, "w" )
-        f.write( "" )
-        f.close()
-        self._iDb.loadDataFromFile(tableName, fileName, False)
-        
-        self.assertTrue( self._iDb.isEmpty(tableName) )
-        
-        self._iDb.dropTable(tableName)
-        os.remove(fileName)
-        
-    def test_isEmpty_False(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % tableName
-        self._iDb.execute( sqlCmd )
-        
-        fileName = "dummyTableFile.txt"
-        f = open( fileName, "w" )
-        f.write( "test" )
-        f.close()
-        self._iDb.loadDataFromFile(tableName, fileName, False)
-        
-        self.assertFalse( self._iDb.isEmpty(tableName) )
-        
-        self._iDb.dropTable(tableName)
-        os.remove(fileName)
-
-    def test_updateInfoTable(self):
-        tableName = "dummyTable" + self._uniqId
-        info = "Table_for_test"
-        
-        self._iDb.updateInfoTable(tableName, info)
-        
-        sqlCmd = 'SELECT file FROM info_tables WHERE name = "%s"' % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        obsResult = False
-        if (info,) in results:
-            obsResult = True
-            sqlCmd = 'DELETE FROM info_tables WHERE name = "%s"' % ( tableName )
-            self._iDb.execute( sqlCmd )
-            
-        self.assertTrue( obsResult )
-        
-    def test_loadDataFromFile_with_empty_file(self):
-        tableName = "dummyPathTable1" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "" )
-        pathF.close()
-        expTPathTuples = ()
-        
-        self._iDb.loadDataFromFile(tableName, pathFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_loadDataFromFile_with_first_line(self):
-        tableName = "dummyPathTable2" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expPathTuple1 = (1L, 'qry', 1L, 100L, 'sbj', 1L, 100L, 1e-123, 136L, 98.4)
-        expPathTuple2 = (2L, 'qry', 500L, 401L, 'sbj', 1L, 100L, 1e-152, 161L, 98.7)
-        expTPathTuples = (expPathTuple1, expPathTuple2)
-        
-        self._iDb.loadDataFromFile(tableName, pathFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_loadDataFromFile_without_first_line(self):
-        tableName = "dummyPathTable3" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expPathTuple = (2L, 'qry', 500L, 401L, 'sbj', 1L, 100L, 1e-152, 161L, 98.7)
-        expTPathTuples = (expPathTuple,)
-        
-        self._iDb.loadDataFromFile(tableName, pathFileName, True)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_createIndex_Map(self):
-        tableName = "dummyMapTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( name varchar(255), chr varchar(255), start int, end int)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["iname", "ichr", "istart", "iend", "icoord", "icoord"]
-        
-        self._iDb.createIndex(tableName, "map")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createIndex_Map_coord_index_already_exist(self):
-        tableName = "dummyMapTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( name varchar(255), chr varchar(255), start int, end int)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX icoord ON %s ( start,end );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["icoord", "icoord", "iname", "ichr", "istart", "iend"]
-        
-        self._iDb.createIndex(tableName, "map")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[1:]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-
-    def test_createTable_Map( self ):
-        tableName = "dummyMapTable" + self._uniqId
-        mapFileName = "dummyMapFile.txt"
-        mapF = open( mapFileName, "w" )
-        mapF.write( "map1\tseq1\t20\t50\n" )
-        mapF.write( "map2\tseq2\t700\t760\n" )
-        mapF.close()
-        
-        expMapTuple1 = ("map1", "seq1", 20L, 50L)
-        expMapTuple2 = ("map2", "seq2", 700L, 760L)
-        expTMapTuples = (expMapTuple1, expMapTuple2)
-        
-        self._iDb.createTable(tableName, 'map', mapFileName)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTMapTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(mapFileName)
-        
-        self.assertEquals( expTMapTuples, obsTMapTuples )
-        
-    def test_createIndex_Match(self):
-        tableName = "dummyMatchTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( query_name varchar(255), query_start int, query_end int, query_length int unsigned, query_length_perc float, match_length_perc float, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, subject_length int unsigned, subject_length_perc float, E_value double, score int unsigned, identity float, path int unsigned)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "qname", "qstart", "qend", "sname", "sstart", "send", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "match")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-        
-        self._iDb.dropTable(tableName)
-        self.assertEquals( expLIndex, obsLIndex)
-
-    def test_createIndex_Match_all_index_already_exist(self):
-        tableName = "dummyMatchTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( query_name varchar(255), query_start int, query_end int, query_length int unsigned, query_length_perc float, match_length_perc float, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, subject_length int unsigned, subject_length_perc float, E_value double, score int unsigned, identity float, path int unsigned)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE UNIQUE INDEX id ON %s ( path );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX qname ON %s ( query_name(10) );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX qstart ON %s ( query_start );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX qend ON %s ( query_end );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX sname ON %s ( subject_name(10) );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX sstart ON %s ( subject_start );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX send ON %s ( subject_end );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX qcoord ON %s ( query_start,query_end );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "qname", "qstart", "qend", "sname", "sstart", "send", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "match")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createTable_match( self ):
-        tableName = "dummyMatchTable" + self._uniqId
-        matchFileName = "dummyMatchFile.txt"
-        matchF = open( matchFileName, "w" )
-        matchF.write( "qry1\t700\t760\t60\t100\t100\tsbj2\t500\t560\t60\t100\t1e-123\t136\t98.4\t2\n" )
-        matchF.write( "qry2\t700\t760\t60\t100\t100\tsbj2\t500\t560\t60\t100\t1e-123\t136\t98.4\t2\n" )
-        matchF.close()
-        
-        expMatchTuple = ("qry2", 700L, 760L, 60L, 100.0, 100.0, "sbj2", 500L, 560L, 60L, 100.0, 1e-123, 136L, 98.4, 2L)
-        expTMatchTuples = (expMatchTuple,)
-        
-        self._iDb.createTable(tableName, "match", matchFileName)
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTMatchTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(matchFileName)
-        
-        self.assertEquals( expTMatchTuples, obsTMatchTuples )
-        
-    def test_createIndex_Path(self):
-        tableName = "dummyPathTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "qname", "qstart", "qend", "sname", "sstart", "send", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "path")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createIndex_Path_id_and_send_index_already_exist(self):
-        tableName = "dummyPathTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX id ON %s ( path );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX send ON %s ( subject_end );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "send", "qname", "qstart", "qend", "sname", "sstart", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "path")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createTable_path( self ):
-        tableName = "dummyPathTable" + self._uniqId
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expPathTuple1 = (1L, "qry", 1L, 100L, "sbj", 1L, 100L, 1e-123, 136L, 98.4)
-        expPathTuple2 = (2L, "qry", 401L, 500L, "sbj", 100L, 1L, 1e-152, 161L, 98.7)  # change coordinates
-        expTPathTuples = (expPathTuple1, expPathTuple2)
-        
-        self._iDb.createTable( tableName, "path", pathFileName)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_createIndex_align(self):
-        tableName = "dummyAlignTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( query_name varchar(255), query_start int, query_end int,subject_name varchar(255), subject_start int unsigned, subject_end int unsigned,E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["qname", "qstart", "qend", "sname", "sstart", "send", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "align")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createIndex_align_qstart_index_already_exist(self):
-        tableName = "dummyAlignTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( query_name varchar(255), query_start int, query_end int,subject_name varchar(255), subject_start int unsigned, subject_end int unsigned,E_value double, score int unsigned, identity float)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX qstart ON %s ( query_start );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["qstart", "qname", "qend", "sname", "sstart", "send", "qcoord", "qcoord"]
-        
-        self._iDb.createIndex(tableName, "align")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createTable_align( self ):
-        tableName = "dummyAlignTable" + self._uniqId
-        alignFileName = "dummyAlignFile.txt"
-        alignF = open( alignFileName, "w" )
-        alignF.write( "query1\t1\t100\tsubject1\t1\t150\t0.5\t15\t35\n" )
-        alignF.write( "query2\t1\t100\tsubject2\t1\t150\t0.5\t15\t35\n" )
-        alignF.close()
-        
-        expAlignTuple1 = ("query1", 1L, 100L, "subject1", 1L, 150L, 0.5, 15L, 35)
-        expAlignTuple2 = ("query2", 1L, 100L, "subject2", 1L, 150L, 0.5, 15L, 35)
-        expTAlignTuples = (expAlignTuple1, expAlignTuple2)
-        
-        self._iDb.createTable( tableName, "align", alignFileName )
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTAlignTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(alignFileName)
-        
-        self.assertEquals( expTAlignTuples, obsTAlignTuples )
-        
-    def test_createIndex_set(self):
-        tableName = "dummySetTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, name varchar(255), chr varchar(255), start int, end int)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "iname", "ichr", "istart", "iend", "icoord", "icoord"]
-        
-        self._iDb.createIndex(tableName, "set")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createIndex_set_id_index_already_exist(self):
-        tableName = "dummySetTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, name varchar(255), chr varchar(255), start int, end int)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX id ON %s ( path );" % (tableName)
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["id", "iname", "ichr", "istart", "iend", "icoord", "icoord"]
-        
-        self._iDb.createIndex(tableName, 'set')
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex[:-1]:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals( expLIndex, obsLIndex)
-        
-    def test_createTable_set( self ):
-        tableName = "dummySetTable" + self._uniqId
-        setFileName = "dummySetFile.txt"
-        setF = open( setFileName, "w" )
-        setF.write( "15\tset1\tchr1\t1\t100\n" )
-        setF.write( "15\tset2\tchr2\t1\t100\n" )
-        setF.close()
-        
-        expSetTuple1 = (15L, "set1", "chr1", 1L, 100L)
-        expSetTuple2 = (15L, "set2", "chr2", 1L, 100L)
-        expTSetTuples = (expSetTuple1, expSetTuple2)
-        
-        self._iDb.createTable( tableName, 'set', setFileName )
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTSetTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(setFileName)
-        
-        self.assertEquals( expTSetTuples, obsTSetTuples )
-     
-    def test_convertMapTableIntoSetTable( self ):
-        mapTableName = "dummyMapTable" + self._uniqId
-        mapFileName = "dummyMapFile.txt"
-        with open(mapFileName, "w") as mapFH:
-            mapFH.write("map1\tchr1\t1\t100\n")
-            mapFH.write("map2\tchr2\t1\t100\n")
-            
-        self._iDb.createTable(mapTableName, 'map', mapFileName)
-        
-        expSetTuple1 = (1, "map1", "chr1", 1, 100)
-        expSetTuple2 = (2, "map2", "chr2", 1, 100)
-        expTSetTuples = (expSetTuple1, expSetTuple2)
-        
-        setTableName = "dummySetTable" + self._uniqId
-        self._iDb.convertMapTableIntoSetTable(mapTableName, setTableName)
-        
-        sqlCmd = "SELECT * FROM %s" % setTableName
-        self._iDb.execute(sqlCmd)
-        obsTSetTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(mapTableName)
-        self._iDb.dropTable(setTableName)
-        os.remove(mapFileName)
-        
-        self.assertEquals( expTSetTuples, obsTSetTuples )
-       
-    def test_createIndex_seq(self):
-        tableName = "dummySeqTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( accession varchar(255), sequence longtext, description varchar(255), length int unsigned)" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["iacc", "idescr"]
-        
-        self._iDb.createIndex(tableName,'seq')
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals(expLIndex, obsLIndex)
-   
-    def test_createIndex_seq_idescr_index_already_exist(self):
-        tableName = "dummySeqTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( accession varchar(255), sequence longtext, description varchar(255), length int unsigned);" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE INDEX idescr ON %s ( description(10) );" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expLIndex = ["idescr", "iacc"]
-        
-        self._iDb.createIndex(tableName,'seq')
-        
-        sqlCmd = "SHOW INDEX FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.cursor.fetchall()
-        
-        for index in expLIndex:
-            sqlCmd = "DROP INDEX %s ON %s" % ( index, tableName )
-            self._iDb.execute( sqlCmd )
-        self._iDb.dropTable(tableName)
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-            
-        self.assertEquals(expLIndex, obsLIndex)
-        
-    def test_createTable_seq( self ):
-        tableName = "dummySeqTable" + self._uniqId
-        seqFileName = "dummySeqFile.txt"
-        seqF = open( seqFileName, "w" )
-        seqF.write( ">acc1 seq1\n" )
-        seqF.write( "ATACTTCGCTAGCTCGC\n" )
-        seqF.write( ">acc2 seq2\n" )
-        seqF.write( "ATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGC\n" )
-        seqF.close()
-        
-        expSeqTuple1 = ("acc1", "ATACTTCGCTAGCTCGC", "acc1 seq1", 17L)
-        expSeqTuple2 = ("acc2", "ATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGC", "acc2 seq2", 68L)
-        expTSeqTuples = (expSeqTuple1, expSeqTuple2)
-        
-        self._iDb.createTable( tableName,'seq', seqFileName )
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTSeqTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(seqFileName)
-        
-        self.assertEquals( expTSeqTuples, obsTSeqTuples )
-    
-    def test_createIndex_job(self):
-        tableName = "dummyTable%s" % self._uniqId
-        sqlCmd = "CREATE TABLE %s" % tableName
-        sqlCmd += " ( jobid INT UNSIGNED"
-        sqlCmd += ", jobname VARCHAR(255)"
-        sqlCmd += ", groupid VARCHAR(255)"
-        sqlCmd += ", command TEXT"
-        sqlCmd += ", launcher VARCHAR(1024)"
-        sqlCmd += ", queue VARCHAR(255)"
-        sqlCmd += ", status VARCHAR(255)"
-        sqlCmd += ", time DATETIME"
-        sqlCmd += ", node VARCHAR(255) )"
-        self._iDb.execute(sqlCmd)
-        expLIndex = ["ijobid", "ijobname", "igroupid", "istatus"]
-        
-        self._iDb.createIndex(tableName, 'jobs')
-        
-        sqlCmd = "SHOW INDEX FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        results = self._iDb.cursor.fetchall()
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-        
-        for index in obsLIndex:
-            sqlCmd = "DROP INDEX %s ON %s" % (index, tableName)
-            self._iDb.execute(sqlCmd)
-        self._iDb.dropTable(tableName)
-            
-        self.assertEquals(expLIndex, obsLIndex)
- 
-    def test_createTable_job( self ):
-        tableName = "dummyTable%s" % self._uniqId
-        expTuples = ()
-        
-        self._iDb.createTable(tableName,'jobs')
-        
-        sqlCmd = "SELECT * FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        obsTuples = self._iDb.cursor.fetchall()
-        self._iDb.dropTable(tableName)
-        
-        self.assertEquals(expTuples, obsTuples)
-      
-    def test_createIndex_length(self):
-        tableName = "dummyTable%s" % self._uniqId
-        sqlCmd = "CREATE TABLE %s (accession varchar(255), length int unsigned)" % tableName
-        self._iDb.execute(sqlCmd)
-        expLIndex = ["iacc", "ilength"]
-        
-        self._iDb.createIndex(tableName,'length')
-        
-        sqlCmd = "SHOW INDEX FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        results = self._iDb.cursor.fetchall()
-        
-        obsLIndex = []
-        for tuple in results:
-            obsLIndex.append(tuple[2])
-        
-        for index in obsLIndex:
-            sqlCmd = "DROP INDEX %s ON %s" % (index, tableName)
-            self._iDb.execute(sqlCmd)
-        self._iDb.dropTable(tableName)
-            
-        self.assertEquals(expLIndex, obsLIndex)
-
-    def test_createTable_length( self ):
-        tableName = "dummyLengthTable%s" % self._uniqId
-        seqFileName = "dummyFile.fa"
-        seqF = open( seqFileName, "w" )
-        seqF.write(">acc1 seq1\n")
-        seqF.write("ATACTTCGCTAGCTCGC\n")
-        seqF.write(">acc2 seq2\n")
-        seqF.write("ATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGC\n")
-        seqF.close()
-        
-        expTuple1 = ("acc1", 17)
-        expTuple2 = ("acc2", 68)
-        expTTuples = (expTuple1, expTuple2)
-        
-        self._iDb.createTable(tableName, "length", seqFileName)
-        
-        sqlCmd = "SELECT * FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        obsTTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(seqFileName)
-        
-        self.assertEquals(expTTuples, obsTTuples)
-        
-    def test_createTable_with_overwrite_Map( self ):
-        tableName = "dummyMapTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        
-        fileName = "dummyMapFile.txt"
-        mapF = open( fileName, "w" )
-        mapF.write( "map1\tseq1\t20\t50\n" )
-        mapF.write( "map2\tseq2\t700\t760\n" )
-        mapF.close()
-        
-        expMapTuple1 = ("map1", "seq1", 20L, 50L)
-        expMapTuple2 = ("map2", "seq2", 700L, 760L)
-        expTMapTuples = (expMapTuple1, expMapTuple2)
-        
-        self._iDb.createTable(tableName, "Map", fileName, True)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTMapTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(fileName)
-        
-        self.assertEquals( expTMapTuples, obsTMapTuples )
-        
-    def test_createTable_without_overwrite_Align( self ):
-        tableName = "dummyAlignTable" + self._uniqId
-        alignFileName = "dummyAlignFile.txt"
-        alignF = open( alignFileName, "w" )
-        alignF.write( "query1\t1\t100\tsubject1\t1\t150\t0.5\t15\t35\n" )
-        alignF.write( "query2\t1\t100\tsubject2\t1\t150\t0.5\t15\t35\n" )
-        alignF.close()
-        
-        expAlignTuple1 = ("query1", 1L, 100L, "subject1", 1L, 150L, 0.5, 15L, 35)
-        expAlignTuple2 = ("query2", 1L, 100L, "subject2", 1L, 150L, 0.5, 15L, 35)
-        expTAlignTuples = (expAlignTuple1, expAlignTuple2)
-        
-        self._iDb.createTable(tableName, "align", alignFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTAlignTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(alignFileName)
-        
-        self.assertEquals( expTAlignTuples, obsTAlignTuples )
-        
-    def test_createTable_without_overwrite_Match( self ):
-        tableName = "dummyMatchTable" + self._uniqId
-        matchFileName = "dummyMatchFile.txt"
-        matchF = open( matchFileName, "w" )
-        matchF.write( "qry1\t700\t760\t60\t100\t100\tsbj2\t500\t560\t60\t100\t1e-123\t136\t98.4\t2\n" )
-        matchF.write( "qry2\t700\t760\t60\t100\t100\tsbj2\t500\t560\t60\t100\t1e-123\t136\t98.4\t2\n" )
-        matchF.close()
-        
-        expMatchTuple = ("qry2", 700L, 760L, 60L, 100.0, 100.0, "sbj2", 500L, 560L, 60L, 100.0, 1e-123, 136L, 98.4, 2L)
-        expTMatchTuples = (expMatchTuple,)
-        
-        self._iDb.createTable(tableName, "tab", matchFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTMatchTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(matchFileName)
-        
-        self.assertEquals( expTMatchTuples, obsTMatchTuples )
-        
-    def test_createTable_without_overwrite_Path( self ):
-        tableName = "dummyPathTable" + self._uniqId
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expPathTuple1 = (1L, "qry", 1L, 100L, "sbj", 1L, 100L, 1e-123, 136L, 98.4)
-        expPathTuple2 = (2L, "qry", 401L, 500L, "sbj", 100L, 1L, 1e-152, 161L, 98.7)  # change coordinates
-        expTPathTuples = (expPathTuple1, expPathTuple2)
-        
-        self._iDb.createTable(tableName, "Path", pathFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_createTable_without_overwrite_Set( self ):
-        tableName = "dummySetTable" + self._uniqId
-        setFileName = "dummySetFile.txt"
-        setF = open( setFileName, "w" )
-        setF.write( "15\tset1\tchr1\t1\t100\n" )
-        setF.write( "15\tset2\tchr2\t1\t100\n" )
-        setF.close()
-        
-        expSetTuple1 = (15L, "set1", "chr1", 1L, 100L)
-        expSetTuple2 = (15L, "set2", "chr2", 1L, 100L)
-        expTSetTuples = (expSetTuple1, expSetTuple2)
-        
-        self._iDb.createTable(tableName, "Set", setFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTSetTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(setFileName)
-        
-        self.assertEquals( expTSetTuples, obsTSetTuples )  
-        
-    def test_createTable_without_overwrite_Seq( self ):
-        tableName = "dummySeqTable" + self._uniqId
-        seqFileName = "dummySeqFile.txt"
-        seqF = open( seqFileName, "w" )
-        seqF.write( ">acc1 seq1\n" )
-        seqF.write( "ATACTTCGCTAGCTCGC\n" )
-        seqF.write( ">acc2 seq2\n" )
-        seqF.write( "ATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGC\n" )
-        seqF.close()
-        
-        expSeqTuple1 = ("acc1", "ATACTTCGCTAGCTCGC", "acc1 seq1", 17L)
-        expSeqTuple2 = ("acc2", "ATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGCATACTTCGCTAGCTCGC", "acc2 seq2", 68L)
-        expTSeqTuples = (expSeqTuple1, expSeqTuple2)
-        
-        self._iDb.createTable(tableName, "fasta", seqFileName, False)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTSeqTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(seqFileName)
-        
-        self.assertEquals( expTSeqTuples, obsTSeqTuples )
-        
-    def test_createTable_with_overwrite_Classif( self ):
-        tableName = "dummyClassifTable" + self._uniqId
-        classifFileName = "dummyClassifFile.txt"
-        with open( classifFileName, "w" ) as f:
-            f.write("RIX-incomp-chim_DmelCaf1_2_0-B-G1000-Map3\t3508\t-\tPotentialChimeric\tI\tLINE\tincomplete\tCI=36; coding=(TE_BLRtx: DMCR1A:ClassI:LINE:Jockey: 14.16%); struct=(TElength: >700bps)\n")
-            f.write("RLX-incomp_DmelCaf1_2_0-B-G1019-Map3\t4131\t+\tok\tI\tLTR\tincomplete\tCI=28; coding=(TE_BLRtx: ROO_I:ClassI:LTR:Bel-Pao: 43.27%, ROO_LTR:ClassI:LTR:Bel-Pao: 100.00%; TE_BLRx: BEL-6_DWil-I_2p:ClassI:LTR:Bel-Pao: 69.84%); struct=(TElength: >4000bps); other=(HG_BLRn: FBtr0087866_Dmel_r4.3: 4.72%; SSRCoverage=0.15<0.75)\n")
-        
-        self._iDb.createTable(tableName, "Classif", classifFileName, True)
-        
-        self.assertTrue(self._iDb.getSize(tableName) == 2)
-        self._iDb.dropTable(tableName)
-        os.remove(classifFileName)
-        
-    def test_createTable_no_file( self ):
-        lTypesToTest = TABLE_SCHEMA_DESCRIPTOR.keys()
-        lTypesToTest.extend(TABLE_TYPE_SYNONYMS)
-        for tableType in lTypesToTest:
-            tableName = "dummy%sTable%s" % (tableType, self._uniqId)
-            self._iDb.createTable(tableName, tableType)
-            
-            self.assertTrue(self._iDb.doesTableExist(tableName))
-            self.assertTrue(self._iDb.isEmpty(tableName))
-            
-            self._iDb.dropTable(tableName)
-        
-    def test_changePathQueryCoordinatesToDirectStrand(self):
-        tableName = "dummyPathTable" + self._uniqId
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t100\t1\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expPathTuple1 = (1L, "qry", 1L, 100L, "sbj", 100L, 1L, 1e-123, 136L, 98.4)
-        expPathTuple2 = (2L, "qry", 401L, 500L, "sbj", 100L, 1L, 1e-152, 161L, 98.7)  
-        expPathTuple3 = (3L, "qry", 5L, 401L, "sbj", 1L, 100L, 1e-152, 161L, 98.7)  
-        expTPathTuples = (expPathTuple1, expPathTuple2, expPathTuple3)
-
-        sqlCmd = "CREATE TABLE %s ( path int unsigned, query_name varchar(255), query_start int , query_end int, subject_name varchar(255), subject_start int unsigned, subject_end int unsigned, E_value double, score int unsigned, identity float)" % tableName
-        self._iDb.execute( sqlCmd )
-        
-        self._iDb.loadDataFromFile(tableName, pathFileName, False)
-        self._iDb.changePathQueryCoordinatesToDirectStrand(tableName)
-        
-        sqlCmd = "SELECT * FROM %s" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        obsTPathTuples = self._iDb.cursor.fetchall()
-        
-        self._iDb.dropTable(tableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals( expTPathTuples, obsTPathTuples )
-        
-    def test_exportDataToFile(self):
-        tableName = "dummyPathTable" + self._uniqId
-        expFileName = "dummyPathFile.txt"
-        pathF = open( expFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        pathF.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        self._iDb.createTable(tableName, "Path", expFileName, False)
-        obsFileName = "DummyObsFileName"
-        
-        self._iDb.exportDataToFile(tableName, obsFileName)
-        
-        self.assertTrue(FileUtils.isRessourceExists(obsFileName))
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        
-        self._iDb.dropTable(tableName)
-        os.remove(expFileName)
-        os.remove(obsFileName)
-        
-    def test_exportDataToFile_keepFirstLineTrue(self):
-        tableName = "dummyPathTable" + self._uniqId
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        pathF.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expFileName = "expPathFile.txt"
-        pathF = open( expFileName, "w" )
-        pathF.write("path\tquery_name\tquery_start\tquery_end\tsubject_name\tsubject_start\tsubject_end\tE_value\tscore\tidentity\n")
-        pathF.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        pathF.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        self._iDb.createTable(tableName, "Path", pathFileName, False)
-        obsFileName = "DummyObsFileName"
-        
-        self._iDb.exportDataToFile(tableName, obsFileName, True)
-        
-        self.assertTrue(FileUtils.isRessourceExists(obsFileName))
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        
-        self._iDb.dropTable(tableName)
-        os.remove(expFileName)
-        os.remove(obsFileName)
-        os.remove(pathFileName)
-        
-    def test_exportDataToFile_with_keepFirstLineTrue_and_param(self):
-        tableName = "dummyPathTable" + self._uniqId
-        pathFileName = "dummyPathFile.txt"
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        pathF.write( "2\tqry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        pathF.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        expFileName = "expPathFile.txt"
-        pathF = open( expFileName, "w" )
-        pathF.write("path\tquery_name\tquery_start\tquery_end\tsubject_name\tsubject_start\tsubject_end\tE_value\tscore\tidentity\n")
-        pathF.write( "2\tqry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        pathF.close()
-        
-        self._iDb.createTable(tableName, "Path", pathFileName, False)
-        obsFileName = "DummyObsFileName"
-        
-        self._iDb.exportDataToFile(tableName, obsFileName, True, "where query_name = 'qry2'")
-        
-        self.assertTrue(FileUtils.isRessourceExists(obsFileName))
-        self.assertTrue(FileUtils.are2FilesIdentical(expFileName, obsFileName))
-        
-        self._iDb.dropTable(tableName)
-        os.remove(expFileName)
-        os.remove(obsFileName)
-        os.remove(pathFileName)
-        
-        
-    def test_convertPathTableIntoAlignTable( self ):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        expAlignFile = "dummyExpAlignFile_%s" % ( self._uniqId )
-        expAlignFileHandler = open( expAlignFile, "w" )
-        expAlignFileHandler.write( "qry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        expAlignFileHandler.write( "qry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        expAlignFileHandler.write( "qry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        expAlignFileHandler.close()
-        obsAlignTable = "dummyObsAlignTable_%s" % ( self._uniqId )
-        
-        self._iDb.convertPathTableIntoAlignTable( inPathTable, obsAlignTable )
-        
-        obsAlignFile = "dummyObsAlignFile_%s" % ( self._uniqId )
-        self._iDb.exportDataToFile( obsAlignTable, obsAlignFile, False )
-        self.assertTrue( FileUtils.are2FilesIdentical( expAlignFile, obsAlignFile ) )
-        
-        for f in [ inPathFile, expAlignFile, obsAlignFile ]:
-            os.remove( f )
-        for t in [ inPathTable, obsAlignTable ]:
-            self._iDb.dropTable( t )
-            
-    def test_convertAlignTableIntoPathTable( self ):
-        inAlignTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inAlignFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inAlignFileHandler = open( inAlignFile, "w" )
-        inAlignFileHandler.write( "qry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        inAlignFileHandler.write( "qry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        inAlignFileHandler.write( "qry3\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inAlignFileHandler.close()
-        self._iDb.createTable( inAlignTable, "align", inAlignFile, True )
-        
-        expPathFile = "dummyExpPathFile_%s" % ( self._uniqId )
-        expPathFileHandler = open( expPathFile, "w" )
-        expPathFileHandler.write( "1\tqry\t1\t100\tsbj\t100\t1\t1e-123\t136\t98.4\n" )
-        expPathFileHandler.write( "2\tqry2\t401\t500\tsbj\t100\t1\t1e-152\t161\t98.7\n" )
-        expPathFileHandler.write( "3\tqry3\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        expPathFileHandler.close()
-        obsPathTable = "dummyObsPathTable_%s" % ( self._uniqId )
-        
-        self._iDb.convertAlignTableIntoPathTable( inAlignTable, obsPathTable )
-        
-        obsPathFile = "dummyObsAlignFile_%s" % ( self._uniqId )
-        self._iDb.exportDataToFile( obsPathTable, obsPathFile, False )
-        self.assertTrue( FileUtils.are2FilesIdentical( expPathFile, obsPathFile ) )
-        
-        for f in [ inAlignFile, expPathFile, obsPathFile ]:
-            os.remove( f )
-        for t in [ inAlignTable, obsPathTable ]:
-            self._iDb.dropTable( t )
-            
-    def test_convertAlignTableIntoPathTable_with_single_quote( self ):
-        inAlignTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inAlignFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inAlignFileHandler = open( inAlignFile, "w" )
-        inAlignFileHandler.write( "qry\t1\t100\t'sbj\t100\t1\t1e-123\t136\t98.4\n" )
-        inAlignFileHandler.write( "qry2\t401\t500\tsbj'\t100\t1\t1e-152\t161\t98.7\n" )
-        inAlignFileHandler.write( "qry3\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inAlignFileHandler.close()
-        self._iDb.createTable( inAlignTable, "align", inAlignFile, True )
-        
-        expPathFile = "dummyExpPathFile_%s" % ( self._uniqId )
-        expPathFileHandler = open( expPathFile, "w" )
-        expPathFileHandler.write( "1\tqry\t1\t100\t'sbj\t100\t1\t1e-123\t136\t98.4\n" )
-        expPathFileHandler.write( "2\tqry2\t401\t500\tsbj'\t100\t1\t1e-152\t161\t98.7\n" )
-        expPathFileHandler.write( "3\tqry3\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        expPathFileHandler.close()
-        obsPathTable = "dummyObsPathTable_%s" % ( self._uniqId )
-        
-        self._iDb.convertAlignTableIntoPathTable( inAlignTable, obsPathTable )
-        
-        obsPathFile = "dummyObsAlignFile_%s" % ( self._uniqId )
-        self._iDb.exportDataToFile( obsPathTable, obsPathFile, False )
-        self.assertTrue( FileUtils.are2FilesIdentical( expPathFile, obsPathFile ) )
-        
-        for f in [ inAlignFile, expPathFile, obsPathFile ]:
-            os.remove( f )
-        for t in [ inAlignTable, obsPathTable ]:
-            self._iDb.dropTable( t )
-          
-    def test_getObjectListWithSQLCmd(self):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t100\t1\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        path1 = Path()
-        path1.setFromTuple((1, "qry", 1, 100, "sbj", 100, 1, 1e-123, 136, 98.4))
-        path2 = Path()
-        path2.setFromTuple((2, "qry", 401, 500, "sbj", 100, 1, 1e-152, 161, 98.7))
-        path3 = Path()  
-        path3.setFromTuple((3, "qry", 5, 401, "sbj", 1, 100, 1e-152, 161, 98.7))
-        expLPath = [path1, path2, path3]
-        sqlCmd = "SELECT * FROM %s;" % (inPathTable)
-        obsLPath = self._iDb.getObjectListWithSQLCmd(sqlCmd, self._getInstanceToAdapt)
-        
-        os.remove( inPathFile )
-        self._iDb.dropTable( inPathTable )
-        
-        self.assertEquals(expLPath, obsLPath)
-    
-    def test_getIntegerListWithSQLCmd(self):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t100\t1\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        expLPath = [1, 2, 3]
-        sqlCmd = "SELECT * FROM %s;" % (inPathTable)
-        obsLPath = self._iDb.getIntegerListWithSQLCmd(sqlCmd)
-        
-        os.remove( inPathFile )
-        self._iDb.dropTable( inPathTable )
-        
-        self.assertEquals(expLPath, obsLPath)
-    
-    def test_getIntegerWithSQLCmd(self):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t100\t1\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        expId = 1
-        sqlCmd = "SELECT path FROM %s where path='%d';" % (inPathTable, 1)
-        obsId = self._iDb.getIntegerWithSQLCmd(sqlCmd)
-        
-        os.remove( inPathFile )
-        self._iDb.dropTable( inPathTable )
-        
-        self.assertEquals(expId, obsId)
-    
-    def test_getStringListWithSQLCmd(self):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t100\t1\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "3\tqry\t5\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        expLString = ["qry","qry","qry"]
-        sqlCmd = "SELECT query_name FROM %s;" % (inPathTable)
-        obsLString = self._iDb.getStringListWithSQLCmd(sqlCmd)
-        
-        os.remove( inPathFile )
-        self._iDb.dropTable( inPathTable )
-        
-        self.assertEquals(expLString, obsLString)
-        
-    def test_removeDoublons( self ):
-        inPathTable = "dummyInPathTable_%s" % ( self._uniqId )
-        inPathFile = "dummyInPathFile_%s" % ( self._uniqId )
-        inPathFileHandler = open( inPathFile, "w" )
-        inPathFileHandler.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        inPathFileHandler.write( "2\tqry\t401\t500\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.write( "2\tqry\t401\t500\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        inPathFileHandler.close()
-        self._iDb.createTable( inPathTable, "path", inPathFile, True )
-        
-        expFile = "dummyExpFile_%s" % ( self._uniqId )
-        expFileHandler = open( expFile, "w" )
-        expFileHandler.write( "1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n" )
-        expFileHandler.write( "2\tqry\t401\t500\tsbj\t1\t100\t1e-152\t161\t98.7\n" )
-        expFileHandler.close()
-        
-        self._iDb.removeDoublons( inPathTable )
-        
-        obsFile = "dummyObsFile_%s" % ( self._uniqId )
-        self._iDb.exportDataToFile(inPathTable, obsFile)
-        
-        self.assertTrue( FileUtils.are2FilesIdentical( expFile, obsFile ) )
-        
-        self._iDb.dropTable( inPathTable )
-        for f in [ inPathFile, expFile, obsFile ]:
-            os.remove( f )
-            
-    def test_getTableListFromPattern_oneTable( self ):
-        inTable = "dummyInTable_%s" % ( self._uniqId )
-        self._iDb.createTable( inTable, "path", "", True )
-        exp = [ inTable ]
-        obs = self._iDb.getTableListFromPattern( "%s%%" % inTable )
-        self.assertEqual( exp, obs )
-        self._iDb.dropTable( inTable )
-        
-    def test_getTableListFromPattern_twoTables( self ):
-        inTable1 = "dummyInTable1_%s" % ( self._uniqId )
-        inTable2 = "dummyInTable2_%s" % ( self._uniqId )
-        inTable3 = "dummyTotoTable3_%s" % ( self._uniqId )
-        for table in [ inTable1, inTable2, inTable3 ]:
-            self._iDb.createTable( table, "path", "", True )
-        exp = [ inTable1, inTable2 ]
-        obs = self._iDb.getTableListFromPattern( "dummyInTable%%_%s" % self._uniqId )
-        self.assertEqual( exp, obs )
-        for table in [ inTable1, inTable2, inTable3 ]:
-            self._iDb.dropTable( table )
-            
-    def test_createPathStatTable(self):
-        statsFileName = "DmelCaf1_statsPerClassif.txt"
-        f = open (statsFileName, "w")
-        f.write("family\tmaxLength\tmeanLength\tcovg\tfrags\tfullLgthFrags\tcopies\tfullLgthCopies\tmeanId\tsdId\tminId\tq25Id\tmedId\tq75Id\tmaxId\tmeanLgth\tsdLgth\tminLgth\tq25Lgth\tmedLgth\tq75Lgth\tmaxLgth\tmeanLgthPerc\tsdLgthPerc\tminLgthPerc\tq25LgthPerc\tmedLgthPerc\tq75LgthPerc\tmaxLgthPerc\n")
-        f.write("Helitron\t2367\t2367\t138367\t852\t0\t803\t0\t81.20\t4.24\t68.55\t78.32\t81.03\t83.49\t100.00\t172.46\t184.92\t21\t70.00\t129.00\t216.00\t2202\t7.29\t7.81\t0.89\t2.96\t5.45\t9.13\t93.03\n")
-        f.write("LINE\t7688\t7688\t3769377\t8358\t10\t6329\t10\t85.52\t8.02\t62.80\t79.27\t83.33\t92.88\t100.00\t597.97\t980.29\t21\t117.00\t256.00\t537.00\t7726\t7.78\t12.75\t0.27\t1.52\t3.33\t6.98\t100.49\n")
-        f.write("LTR\t13754\t13754\t9146587\t20749\t0\t17868\t1\t82.69\t7.39\t58.76\t77.81\t80.82\t85.67\t100.00\t519.75\t1217.12\t20\t105.00\t183.50\t336.00\t13738\t3.78\t8.85\t0.15\t0.76\t1.33\t2.44\t99.88\n")
-        f.write("MITE\t378\t378\t2890\t10\t3\t9\t3\t98.78\t1.20\t95.80\t98.64\t99.18\t99.46\t99.73\t325.33\t47.86\t253\t290.00\t333.00\t362.00\t390\t86.07\t12.66\t66.93\t76.72\t88.10\t95.77\t103.17\n")
-        f.write("NoCat\t9999\t9999\t384076\t1297\t1\t1219\t1\t82.60\t6.73\t61.20\t78.37\t81.41\t85.29\t100.00\t323.01\t686.85\t21\t64.00\t139.00\t280.00\t10000\t3.23\t6.87\t0.21\t0.64\t1.39\t2.80\t100.01\n")
-        f.write("SSR\t680\t680\t325152\t2340\t24\t2290\t28\t79.07\t3.60\t69.19\t76.64\t79.02\t81.10\t97.83\t221.64\t139.84\t21\t121.00\t183.00\t285.00\t799\t32.59\t20.57\t3.09\t17.79\t26.91\t41.91\t117.50\n")
-        f.write("TIR\t2532\t2532\t700173\t2503\t5\t2160\t5\t84.70\t7.43\t64.03\t79.46\t82.77\t90.09\t100.00\t326.54\t405.94\t21\t90.00\t187.00\t342.00\t2758\t12.90\t16.03\t0.83\t3.55\t7.39\t13.51\t108.93\n")
-        f.write("confused\t19419\t19419\t1299224\t3903\t0\t3311\t0\t82.30\t6.34\t63.20\t78.17\t80.81\t84.58\t100.00\t408.22\t989.57\t21\t113.00\t207.00\t339.00\t17966\t2.10\t5.10\t0.11\t0.58\t1.07\t1.75\t92.52\n")
-        f.close()
-        tableName = "dummyDmelCaf1_chr_allTEs_nr_noSSR_join_path_statsPerClassif"
-        self._iDb.createTable(tableName, "pathstat", statsFileName)
-        
-        self.assertTrue(self._iDb.doesTableExist(tableName))
-        
-        expSize = 8
-        obsSize = self._iDb.getSize(tableName)
-        self.assertEquals(expSize, obsSize)
-        
-        expColumnNb = 29
-        sqlCmd = "DESC %s;" % tableName
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.fetchall()
-        obsColumnNb = len(res)
-        self.assertEquals(expColumnNb, obsColumnNb)
-        
-        self._iDb.dropTable(tableName)
-        os.remove(statsFileName)
-        
-    def test_createJobTable_is_table_created(self):
-        tableName = "dummyJobTable" + self._uniqId
-        self._iDb.createTable(tableName, "jobs")
-        self.assertTrue(self._iDb.doesTableExist(tableName))
-        self._iDb.dropTable(tableName)
-        
-    def test_createClassifTable(self):
-        tableName = "dummyClassifTable"
-        self._iDb.dropTable(tableName)        
-        fileName = "test.classif"
-        
-        with open(fileName, "w") as f:
-            f.write("RIX-incomp-chim_DmelCaf1_2_0-B-G1000-Map3\t3508\t-\tPotentialChimeric\tI\tLINE\tincomplete\tCI=36; coding=(TE_BLRtx: DMCR1A:ClassI:LINE:Jockey: 14.16%, FW3_DM:ClassI:LINE:Jockey: 15.07%; TE_BLRx: CR1-1_DWil_2p:ClassI:LINE:Jockey: 18.98%, FW2_DM-ORF1p:ClassI:LINE:Jockey: 22.36%, Jockey-1_DYa_1p:ClassI:LINE:Jockey: 11.86%); struct=(TElength: >700bps); other=(TE_BLRx: Gypsy7-I_Dmoj_1p:ClassI:LTR:Gypsy: 12.58%; HG_BLRn: FBtr0089196_Dmel_r4.3: 11.74%; SSRCoverage=0.12<0.75)\n")
-            f.write("RLX-incomp_DmelCaf1_2_0-B-G1019-Map3\t4131\t+\tok\tI\tLTR\tincomplete\tCI=28; coding=(TE_BLRtx: ROO_I:ClassI:LTR:Bel-Pao: 43.27%, ROO_LTR:ClassI:LTR:Bel-Pao: 100.00%; TE_BLRx: BEL-6_DWil-I_2p:ClassI:LTR:Bel-Pao: 69.84%); struct=(TElength: >4000bps); other=(HG_BLRn: FBtr0087866_Dmel_r4.3: 4.72%; SSRCoverage=0.15<0.75)\n")
-            f.write("RLX-incomp_DmelCaf1_2_0-B-G1025-Map3\t6534\t-\tok\tI\tLTR\tincomplete\tCI=28; coding=(TE_BLRtx: Gypsy2-I_Dmoj:ClassI:LTR:Gypsy: 11.82%, MDG3_DM:ClassI:LTR:Gypsy: 17.43%, STALKER2_LTR:ClassI:LTR:Gypsy: 14.62%, STALKER4_LTR:ClassI:LTR:Gypsy: 57.21%; TE_BLRx: Gypsy-16_DWil-I_1p:ClassI:LTR:Gypsy: 32.19%; profiles: PF00665.18_rve_INT_32.0: 68.64%); struct=(TElength: >4000bps); other=(HG_BLRn: FBtr0070036_Dmel_r4.3: 3.73%; TermRepeats: non-termLTR: 1701; SSRCoverage=0.14<0.75)\n")
-      
-        self._iDb.createTable(tableName, "classif", fileName)
-        self.assertTrue(self._iDb.doesTableExist(tableName))
-        
-        expColumnNb = 8
-        sqlCmd = "DESC %s;" % tableName
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.fetchall()
-        obsColumnNb = len(res)
-        self.assertEquals(expColumnNb, obsColumnNb)
-        
-        expSize = 3
-        obsSize = self._iDb.getSize(tableName)
-        self.assertEquals(expSize, obsSize)
-        
-        expLIndex = ["iseq_name", "istatus", "iclass", "iorder", "icomp"]
-        sqlCmd = "SHOW INDEX FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.cursor.fetchall()
-        obsLIndex = []
-        for tuple in res:
-            obsLIndex.append(tuple[2])
-        self.assertEquals(expLIndex, obsLIndex)
-  
-        self._iDb.dropTable(tableName)
-        os.remove(fileName)
-        
-    def test_createClassifIndex(self):
-        tableName = "dummyclassifTable%s" % self._uniqId
-        sqlCmd = "CREATE TABLE %s (seq_name varchar(255), length int unsigned, strand char, status varchar(255), class_classif varchar(255), order_classif varchar(255), completeness varchar(255), evidences text);" % tableName
-        self._iDb.execute(sqlCmd)
-        expLIndex = ["iseq_name", "istatus", "iclass", "iorder", "icomp"]
-        
-        self._iDb.createIndex(tableName, "classif")
-        
-        sqlCmd = "SHOW INDEX FROM %s" % tableName
-        self._iDb.execute(sqlCmd)
-        res = self._iDb.cursor.fetchall()
-        
-        obsLIndex = []
-        for tuple in res:
-            obsLIndex.append(tuple[2])
-        self.assertEquals(expLIndex, obsLIndex)
-        self._iDb.dropTable(tableName)
-
-    def test_createBinPathTable(self):
-        pathFileName = "dummy.path"
-        with open(pathFileName, "w") as pathF:
-            pathF.write("1\tqry\t1\t100\tsbj\t1\t100\t1e-123\t136\t98.4\n")
-            pathF.write("2\tqry\t500\t401\tsbj\t1\t100\t1e-152\t161\t98.7\n")
-        
-        expPathTuple1 = (1, 1000000, "qry", 1, 100, 1)
-        expPathTuple2 = (2, 1000000, "qry", 401, 500, 1)  # change coordinates
-        expTPathTuples = (expPathTuple1, expPathTuple2)
-        
-        pathTableName = "dummy_path"
-        idxTableName = "dummy_path_idx"
-        self._iDb.createTable(pathTableName, "path", pathFileName)
-        self._iDb.createBinPathTable(pathTableName, True)
-        
-        sqlCmd = "SELECT * FROM %s" % idxTableName
-        self._iDb.execute(sqlCmd)
-        obsTPathTuples = self._iDb.fetchall()
-        
-        self._iDb.dropTable(pathTableName)
-        self._iDb.dropTable(idxTableName)
-        os.remove(pathFileName)
-        
-        self.assertEquals(expTPathTuples, obsTPathTuples)
-
-    def test_createBinSetTable(self):
-        setFileName = "dummy.set"
-        with open(setFileName, "w") as setF:
-            setF.write("1\tseq1\tchr1\t1900\t3900\n")
-            setF.write("2\tseq2\tchr1\t2\t9\n")
-            setF.write("3\tseq3\tchr1\t8\t13\n")
-            
-        expTuple = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L))
-        
-        setTableName = "dummy_set"
-        idxTableName = "dummy_set_idx"
-        self._iDb.createTable(setTableName, "set", setFileName)
-        self._iDb.createBinSetTable(setTableName, True)
-        
-        sqlCmd = "SELECT * FROM %s" % idxTableName
-        self._iDb.execute(sqlCmd)
-        obsTuple = self._iDb.fetchall()
-        
-        self._iDb.dropTable(setTableName)
-        self._iDb.dropTable(idxTableName)
-        os.remove(setFileName)
-        
-        self.assertEquals(expTuple, obsTuple)
-
-    def _getInstanceToAdapt(self):
-        iPath = Path()
-        return iPath
-            
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Test_DbSQLite.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,162 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import time
-from commons.core.sql.DbSQLite import DbSQLite
-
-class Test_DbSQLite(unittest.TestCase):
-
-    def setUp( self ):
-        self._iDb = DbSQLite("test.db")
-        self._uniqId = "%s" % time.strftime("%Y%m%d%H%M%S")
-        
-    def tearDown( self ):
-        if self._iDb.open():
-            self._iDb.close()
-        self._iDb.delete()
-        self._iDb = None
-        
-    def test_open_True(self):
-        self._iDb.close()
-        self.assertTrue( self._iDb.open(1) )
-
-    def test_open_False(self):
-        self._iDb.close()
-        self._iDb.host = "/toto/toto.db"
-        self.assertFalse( self._iDb.open(1) )
-        self._iDb.host = "test.db"
-
-    def test_updateInfoTable(self):
-        tableName = "dummyTable" + self._uniqId
-        info = "Table_for_test"
-        
-        self._iDb.updateInfoTable(tableName, info)
-        
-        sqlCmd = 'SELECT file FROM info_tables WHERE name = "%s"' % ( tableName )
-        self._iDb.execute( sqlCmd )
-        results = self._iDb.fetchall()
-        obsResult = False
-        if (info,) in results:
-            obsResult = True
-            sqlCmd = 'DELETE FROM info_tables WHERE name = "%s"' % ( tableName )
-            self._iDb.execute( sqlCmd )
-            
-        self.assertTrue( obsResult )
-        
-    def test_doesTableExist_True(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        self.assertTrue( self._iDb.doesTableExist(tableName) )
-
-    def test_dropTable(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % tableName
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE TABLE info_tables ( name varchar(255), file varchar(255) )"
-        self._iDb.execute( sqlCmd )
-        sqlCmd = 'INSERT INTO info_tables VALUES ("%s","")' % tableName
-        self._iDb.execute( sqlCmd )
-        
-        self._iDb.dropTable(tableName)
-        self.assertFalse( self._iDb.doesTableExist(tableName) )
-        
-    def test_doesTableExist_False(self):
-        tableName = "dummyTable" + self._uniqId
-        self.assertFalse( self._iDb.doesTableExist(tableName) )
-        
-    def test_createJobTable_is_table_created(self):
-        self._iDb.createTable("dummyJobTable", "jobs")
-        isTableCreated = self._iDb.doesTableExist("dummyJobTable")
-        self.assertTrue(isTableCreated)
-    
-    def test_createJobTable_field_list(self):
-        self._iDb.createTable("dummyJobTable", "jobs")
-        obsLFiled = self._iDb.getFieldList("dummyJobTable")
-        expLField = ["jobid", "jobname", "groupid", "command", "launcher", "queue", "status", "time", "node"]
-        self.assertEquals(expLField, obsLFiled)
-        
-    def test_createTable(self):
-        tableName = "dummyJobTable" + self._uniqId
-        self._iDb.createTable(tableName, "job")
-        obsLFiled = self._iDb.getFieldList(tableName)
-        expLField = ["jobid", "jobname", "groupid", "command", "launcher", "queue", "status", "time", "node"]
-        self.assertEquals(expLField, obsLFiled)
-        
-    def test_createTable_with_overwrite_Job(self):
-        tableName = "dummyJobTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % tableName
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "CREATE TABLE info_tables ( name varchar(255), file varchar(255) )"
-        self._iDb.execute( sqlCmd )
-        sqlCmd = 'INSERT INTO info_tables VALUES ("%s","")' % tableName
-        self._iDb.execute( sqlCmd )
-        
-        self._iDb.createTable(tableName, "job", True)
-        obsLFiled = self._iDb.getFieldList(tableName)
-        expLField = ["jobid", "jobname", "groupid", "command", "launcher", "queue", "status", "time", "node"]
-        self.assertEquals(expLField, obsLFiled)
-        
-    def test_getSize_empty_table(self):
-        tableName = "dummyJobTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        expSize = 0
-        obsSize = self._iDb.getSize(tableName)
-        self.assertEquals( expSize, obsSize )
-        
-    def test_getSize_one_rows(self):
-        tableName = "dummyJobTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        sqlCmd = "INSERT INTO %s (dummyColumn) VALUES ('toto')" % tableName
-        self._iDb.execute( sqlCmd )
-        expSize = 1
-        obsSize = self._iDb.getSize(tableName)
-        self.assertEquals( expSize, obsSize )
-        
-    def test_isEmpty_True(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % ( tableName )
-        self._iDb.execute( sqlCmd )
-        self.assertTrue(self._iDb.isEmpty(tableName))
-        
-    def test_isEmpty_False(self):
-        tableName = "dummyTable" + self._uniqId
-        sqlCmd = "CREATE TABLE %s ( dummyColumn varchar(255) )" % (tableName)
-        self._iDb.execute(sqlCmd)
-        sqlCmd = "INSERT INTO %s (dummyColumn) VALUES ('toto')" % tableName
-        self._iDb.execute(sqlCmd)
-        self.assertFalse(self._iDb.isEmpty(tableName))
-        
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Test_F_JobAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.sql.Job import Job
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-import sys
-import stat
-import os
-import time
-import unittest
-import glob
-
-class Test_F_TableJobAdaptator(unittest.TestCase):
-
-    def setUp(self):
-        self._jobTableName = "dummyJobTable"
-        self._iJA = TableJobAdaptatorFactory.createJobInstance()
-
-    def tearDown(self):
-        pass
-    
-    def test_submitJob(self):
-        job1 = self._createJobInstance("job1")
-        self._createLauncherFile(job1, self._iJA)
-        job2 = self._createJobInstance("job2")
-        self._createLauncherFile(job2, self._iJA)
-        job3 = self._createJobInstance("job3")
-        self._createLauncherFile(job3, self._iJA)
-        
-        self._iJA.submitJob( job1, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iJA.submitJob( job2, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iJA.submitJob( job3, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-
-        time.sleep(120)
-        
-        expErrorFilePrefix1 = job1.jobname + ".e" 
-        expOutputFilePrefix1 = job1.jobname + ".o"
-        expErrorFilePrefix2 = job2.jobname + ".e" 
-        expOutputFilePrefix2 = job2.jobname + ".o"
-        expErrorFilePrefix3 = job3.jobname + ".e" 
-        expOutputFilePrefix3 = job3.jobname + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        lErrorFiles2 = glob.glob(expErrorFilePrefix2 + "*")
-        lOutputFiles2 = glob.glob(expOutputFilePrefix2 + "*")
-        lErrorFiles3 = glob.glob(expErrorFilePrefix3 + "*")
-        lOutputFiles3 = glob.glob(expOutputFilePrefix3 + "*")
-        
-        isLErrorFileNotEmpty1 = (len(lErrorFiles1) != 0) 
-        isLOutputFileNotEmpty1 = (len(lOutputFiles1) != 0)
-        isLErrorFileNotEmpty2 = (len(lErrorFiles2) != 0) 
-        isLOutputFileNotEmpty2 = (len(lOutputFiles2) != 0)
-        isLErrorFileNotEmpty3 = (len(lErrorFiles3) != 0) 
-        isLOutputFileNotEmpty3 = (len(lOutputFiles3) != 0)
-        
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileNotEmpty1 and isLOutputFileNotEmpty1)
-        self.assertTrue(isLErrorFileNotEmpty2 and isLOutputFileNotEmpty2)
-        self.assertTrue(isLErrorFileNotEmpty3 and isLOutputFileNotEmpty3)
-    
-    def test_submit_and_waitJobGroup(self):
-        iJob = self._createJobInstance("test")
-        self._createLauncherFile(iJob, self._iJA)
-        
-        self._iJA.submitJob( iJob, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iJA.waitJobGroup(iJob.groupid, 0, 2)
-        
-        expErrorFilePrefix1 = iJob.jobname + ".e" 
-        expOutputFilePrefix1 = iJob.jobname + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        
-        isLErrorFileExist = (len(lErrorFiles1) != 0) 
-        isLOutputFileExist = (len(lOutputFiles1) != 0)
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileExist and isLOutputFileExist)
-
-    def _createJobInstance(self, name):
-        lResources = []
-        if os.environ.get("HOSTNAME") == "compute-2-46.local":
-            lResources.append("test=TRUE")
-        return Job(0, name, "test", "", "log = os.system(\"date;sleep 5;date\")", "%s/launcherFileTest_%s.py" % (os.getcwd(), name), lResources=lResources)
-
-    def _createLauncherFile(self, iJob, iJA):
-        iWriteScript = WriteScript(iJob, iJA, os.getcwd(), os.getcwd(), False, True)
-        iWriteScript.run(iJob.command, "", iJob.launcher)
-        os.chmod(iJob.launcher, stat.S_IRWXU+stat.S_IRWXG+stat.S_IRWXO)
-        
-if __name__ == "__main__":
-    unittest.main()
--- a/commons/core/sql/test/Test_F_TableJobAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,185 +0,0 @@
-from commons.core.launcher.WriteScript import WriteScript
-from commons.core.sql.Job import Job
-from commons.core.sql.DbFactory import DbFactory
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-import sys
-import stat
-import os
-import time
-import unittest
-import glob
-
-class Test_F_TableJobAdaptator(unittest.TestCase):
-
-    def setUp(self):
-        self._jobTableName = "dummyJobTable"
-        self._db = DbFactory.createInstance()
-        self._iTJA = TableJobAdaptatorFactory.createInstance(self._db, self._jobTableName)
-
-    def tearDown(self):
-        self._db.dropTable(self._jobTableName)
-        self._db.close()
-    
-    def test_submitJob_with_multiple_jobs(self):
-        self._db.createTable(self._jobTableName, "jobs", overwrite = True)
-        job1 = _createJobInstance("job1")
-        _createLauncherFile(job1, self._iTJA)
-        job2 = _createJobInstance("job2")
-        _createLauncherFile(job2, self._iTJA)
-        job3 = _createJobInstance("job3")
-        _createLauncherFile(job3, self._iTJA)
-        
-        self._iTJA.submitJob( job1, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iTJA.submitJob( job2, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iTJA.submitJob( job3, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-
-        time.sleep(120)
-        
-        expJobStatus = "finished"
-        obsJobStatus1 = self._iTJA.getJobStatus(job1)
-        obsJobStatus2 = self._iTJA.getJobStatus(job2)
-        obsJobStatus3 = self._iTJA.getJobStatus(job3)
-        
-        self.assertEquals(expJobStatus, obsJobStatus1)
-        self.assertEquals(expJobStatus, obsJobStatus2)
-        self.assertEquals(expJobStatus, obsJobStatus3)
-        
-        expErrorFilePrefix1 = job1.jobname + ".e" 
-        expOutputFilePrefix1 = job1.jobname + ".o"
-        expErrorFilePrefix2 = job2.jobname + ".e" 
-        expOutputFilePrefix2 = job2.jobname + ".o"
-        expErrorFilePrefix3 = job3.jobname + ".e" 
-        expOutputFilePrefix3 = job3.jobname + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        lErrorFiles2 = glob.glob(expErrorFilePrefix2 + "*")
-        lOutputFiles2 = glob.glob(expOutputFilePrefix2 + "*")
-        lErrorFiles3 = glob.glob(expErrorFilePrefix3 + "*")
-        lOutputFiles3 = glob.glob(expOutputFilePrefix3 + "*")
-        
-        isLErrorFileNotEmpty1 = (len(lErrorFiles1) != 0) 
-        isLOutputFileNotEmpty1 = (len(lOutputFiles1) != 0)
-        isLErrorFileNotEmpty2 = (len(lErrorFiles2) != 0) 
-        isLOutputFileNotEmpty2 = (len(lOutputFiles2) != 0)
-        isLErrorFileNotEmpty3 = (len(lErrorFiles3) != 0) 
-        isLOutputFileNotEmpty3 = (len(lOutputFiles3) != 0)
-        
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileNotEmpty1 and isLOutputFileNotEmpty1)
-        self.assertTrue(isLErrorFileNotEmpty2 and isLOutputFileNotEmpty2)
-        self.assertTrue(isLErrorFileNotEmpty3 and isLOutputFileNotEmpty3)
-
-    def test_submitJob_job_already_submitted(self):
-        self._db.createTable(self._jobTableName, "jobs", overwrite = True)
-        iJob = _createJobInstance("job")
-        self._iTJA.recordJob(iJob)
-        
-        isSysExitRaised = False
-        try:
-            self._iTJA.submitJob(iJob)
-        except SystemExit:
-            isSysExitRaised = True
-        self.assertTrue(isSysExitRaised)
-    
-    def test_waitJobGroup_with_error_job_maxRelaunch_two(self):
-        self._db.createTable(self._jobTableName, "jobs", overwrite = True)
-        iJob = _createJobInstance("job")
-        _createLauncherFile(iJob, self._iTJA)
-        
-        self._iTJA.recordJob(iJob)
-        self._iTJA.changeJobStatus(iJob, "error")
-        
-        self._iTJA.waitJobGroup(iJob.groupid, 0, 2)
-        
-        time.sleep(120)
-        
-        expJobStatus = "finished"
-        obsJobStatus1 = self._iTJA.getJobStatus(iJob)
-        
-        self.assertEquals(expJobStatus, obsJobStatus1)
-        
-        expErrorFilePrefix1 = iJob.jobname + ".e" 
-        expOutputFilePrefix1 = iJob.jobname + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        
-        isLErrorFileNotEmpty1 = (len(lErrorFiles1) != 0) 
-        isLOutputFileNotEmpty1 = (len(lOutputFiles1) != 0)
-        
-        self._iTJA.removeJob(iJob) 
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileNotEmpty1 and isLOutputFileNotEmpty1)
-
-class Test_F_TableJobAdaptator_SGE(unittest.TestCase):
-
-    def setUp(self):
-        if os.environ["REPET_JOB_MANAGER"].lower() != "sge":
-            print "ERROR: jobs manager is not SGE: REPET_JOB_MANAGER = %s." % os.environ["REPET_JOB_MANAGER"]
-            sys.exit(0)
-        self._jobTableName = "dummyJobTable"
-        self._db = DbFactory.createInstance()
-        self._db.createTable(self._jobTableName, "jobs", overwrite = True)
-        self._iTJA = TableJobAdaptatorFactory.createInstance(self._db, self._jobTableName)
-        self._iJob = _createJobInstance("job")
-        _createLauncherFile(self._iJob, self._iTJA)
-
-    def tearDown(self):
-        self._db.dropTable(self._jobTableName)
-        self._db.close()
-
-    def test_waitJobGroup_with_several_nbTimeOut_waiting(self):
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.changeJobStatus(self._iJob, "running")
-        
-        expMsg = "ERROR: job '%s', supposedly still running, is not handled by SGE anymore\n" % self._iJob.jobid
-        
-        obsError = "obsError.txt"
-        obsErrorHandler = open(obsError, "w")
-        stderrRef = sys.stderr
-        sys.stderr = obsErrorHandler
-        
-        isSysExitRaised = False
-        try:
-            self._iTJA.waitJobGroup(self._iJob.groupid, timeOutPerJob = 3)
-        except SystemExit:
-            isSysExitRaised = True
-           
-        obsErrorHandler.close()
-        
-        obsErrorHandler = open(obsError, "r")
-        obsMsg = obsErrorHandler.readline()
-        obsErrorHandler.close()
-       
-        sys.stderr = stderrRef
-        os.remove(obsError)
-        os.system("rm launcherFileTest*.py")
-        self.assertTrue(isSysExitRaised)
-        self.assertEquals(expMsg, obsMsg)
-         
-    def test_isJobStillHandledBySge_True(self):
-        self._iTJA.submitJob(self._iJob)
-        isJobHandledBySge = self._iTJA.isJobStillHandledBySge(self._iJob.jobid, self._iJob.jobname)
-        os.system("rm launcherFileTest*.py")
-        self.assertTrue(isJobHandledBySge)
-
-    def test_isJobStillHandledBySge_False(self):
-        self._iTJA.recordJob(self._iJob)
-        isJobHandledBySge = self._iTJA.isJobStillHandledBySge(self._iJob.jobid, self._iJob.jobname)
-        os.system("rm launcherFileTest*.py")
-        self.assertFalse(isJobHandledBySge)
-
-def _createJobInstance(name):
-    lResources = []
-    if os.environ.get("HOSTNAME") == "compute-2-46.local":
-        lResources.append("test=TRUE")
-    return Job(0, name, "test", "", "log = os.system(\"date;sleep 5;date\")", "%s/launcherFileTest_%s.py" % (os.getcwd(), name), lResources=lResources)
-
-def _createLauncherFile(iJob, iTJA):
-    iWriteScript = WriteScript(iJob, iTJA, os.getcwd(), os.getcwd())
-    iWriteScript.run(iJob.command, "", iJob.launcher)
-    os.chmod(iJob.launcher, stat.S_IRWXU+stat.S_IRWXG+stat.S_IRWXO)
-        
-if __name__ == "__main__":
-    unittest.main()
--- a/commons/core/sql/test/Test_Job.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-import unittest
-from commons.core.sql.Job import Job
-
-class Test_Job(unittest.TestCase):
-
-    def test__eq__(self):
-        self._job = Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertEqual( self._job, o ) # same data
-        o =  Job(jobid=1, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different jobid        
-        o =  Job(jobid=0, jobname="test1", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different jobname
-        o =  Job(jobid=0, jobname="test", groupid="test1", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different groupid
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test1",command="test", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different queue        
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test1", launcherFile="test", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different command
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test1", node="test", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different launcherFile
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test1", lResources="mem_free=1G" )
-        self.assertNotEqual( self._job, o ) # different node
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=2G" )
-        self.assertNotEqual( self._job, o ) # different lResources
-        o =  Job(jobid=0, jobname="test", groupid="test", queue="test",command="test", launcherFile="test", node="test", lResources="mem_free=1G", parallelEnvironment="multithread 6" )
-        self.assertNotEqual( self._job, o ) # different parallelEnvironment
-                
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Test_TableBinPathAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1244 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-import unittest
-import os
-import time
-from commons.core.sql.TableBinPathAdaptator import TableBinPathAdaptator
-from commons.core.coord.Path import Path
-from commons.core.coord.Set import Set
-from commons.core.sql.DbFactory import DbFactory
-
-class Test_TableBinPathAdaptator( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._uniqId = "%s_%s" % (time.strftime("%Y%m%d%H%M%S") , os.getpid())
-        self._db = DbFactory.createInstance()
-        self._table = "dummyPathTable_%s" % self._uniqId
-        self._table_idx = "dummyPathTable_%s_idx" % self._uniqId
-        
-    def tearDown( self ):
-        self._db.dropTable(self._table)
-        self._db.dropTable(self._table_idx)
-        self._db.close()
- 
-    #TODO: strand ?!? How does it work ?
-    def test_insert_QryRevSbjDir( self ):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("1", "chr1", "250", "100", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        tuple = ("4", "chr5", "140", "251", "TE5", "140", "251", "2e-14", "14", "73.1")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.insert(p4)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (1, "chr1", 100, 250, "TE1", 17, 11, 1e-18, 20, 87.4),
-                        (2, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),
-                        (4, "chr5", 140, 251, "TE5", 140, 251, 2e-14, 14, 73.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (1, 1000000, "chr1", 100, 250, 1),
-                        (2, 1000000, "chr1", 15, 30, 1),
-                        (4, 1000000, "chr5", 140, 251, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_getPathListOverlappingQueryCoord_one_included( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("6\tchr1\t950\t1010\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("6", "chr1", "950", "1010", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lObs = self._tpA.getPathListOverlappingQueryCoord( "chr1", 900, 1010 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p1]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getPathListOverlappingQueryCoord_two_overlapped( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("6\tchr1\t950\t1500\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t750\t1000\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("6", "chr1", "950", "1500", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("7", "chr1", "750", "1000", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lObs = self._tpA.getPathListOverlappingQueryCoord( "chr1", 900, 1010 )
-        self.assertEquals(2, len(lObs))
-        lExp = [p1, p2]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getPathListOverlappingQueryCoord_two_not_overlapped_and_not_included( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("6\tchr1\t1050\t1500\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t750\t800\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("6", "chr1", "1050", "1500", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("7", "chr1", "750", "800", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lObs = self._tpA.getPathListOverlappingQueryCoord( "chr1", 900, 1010 )
-        self.assertEquals(0, len(lObs))
-        lExp = []
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getPathListOverlappingQueryCoord_one_verlapping_and_others_chained( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("6\tchr1\t900\t1010\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("6\tchr1\t1020\t1030\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t950\t999\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t1020\t1030\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr5\t8000\t15000\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("6", "chr1", "900", "1010", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("6", "chr1", "1020", "1030", "TE2", "10", "13", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("7", "chr1", "850", "999", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        tuple = ("7", "chr1", "1020", "1030", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        tuple = ("7", "chr5", "8000", "15000", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p5 = Path()
-        p5.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lObs = self._tpA.getPathListOverlappingQueryCoord( "chr1", 1000, 1010 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p1]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getChainListOverlappingQueryCoord_with_all_path_strictly_included_in_the_given_region(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr1\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("4\tchr1\t11\t15\tTE2\t15\t19\t1e-10\t25\t80.2\n")
-        pathF.write("5\tchr1\t14\t19\tTE1\t1\t6\t1e-15\t45\t98.4\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "1", "10", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "2", "9", "TE2", "10", "13", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-
-        tuple = ("3", "chr1", "8", "13", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        tuple = ("4", "chr1", "11", "15", "TE2", "15", "19", "1e-10", "25", "80.2")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        tuple = ("5", "chr1", "14", "19", "TE1", "1", "6", "1e-15", "45", "98.4")
-        p5 = Path()
-        p5.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 1, 20 )
-        self.assertEquals(5, len(lObs))
-        
-        lExp = [p1, p2, p3, p4, p5]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getChainListOverlappingQueryCoord_with_2_path_overlapping_the_given_region(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t20\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t10\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "1", "20", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "10", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 12, 18 )
-        self.assertEquals(2, len(lObs))
-        
-        lExp = [p1, p2]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getChainListOverlappingQueryCoord_without_path_overlapping_the_given_region(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t20\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t10\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.write("3\tchr5\t45\t50\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "1", "20", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "10", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr5", "45", "50", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 40, 50 )
-        self.assertEquals(0, len(lObs))
-        
-        lExp = []
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getChainListOverlappingQueryCoord_with_inverse_coord(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t2000\t1010\tTE2\t17\t11\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t5000\t3030\tTE2\t13\t10\t1e-20\t30\t90.2\n")
-        pathF.close()
-        
-        tuple = ("1", "chr1", "2000", "1010", "TE2", "17", "11", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "5000", "3030", "TE2", "13", "10", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 1000, 1500 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p1]
-        self.assertEquals(lExp, lObs)
-        
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 4000, 4510 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p2]
-        self.assertEquals(lExp, lObs)
-        
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 1000, 4510 )
-        self.assertEquals(2, len(lObs))
-        lExp = [p1, p2]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_getChainListOverlappingQueryCoord_with_chain_id_and_coord( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("6\tchr1\t900\t1010\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("6\tchr1\t1020\t1030\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t950\t999\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr1\t1020\t1030\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("7\tchr5\t8000\t15000\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("6", "chr1", "900", "1010", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("6", "chr1", "1020", "1030", "TE2", "10", "13", "1e-20", "30", "90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("7", "chr1", "950", "999", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        tuple = ("7", "chr1", "1020", "1030", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        tuple = ("7", "chr5", "8000", "15000", "TE2", "11", "17", "1e-20", "30", "90.2")
-        p5 = Path()
-        p5.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getChainListOverlappingQueryCoord( "chr1", 1000, 1010 )
-        self.assertEquals(5, len(lObs))
-        lExp = [p1, p2, p3, p4, p5]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-
-    def test_getPathListIncludedInQueryCoord_all_included(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t10\t20\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t20\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "10", "20", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "20", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getPathListIncludedInQueryCoord( "chr1", 1, 40 )
-        self.assertEquals(2, len(lObs))
-        
-        lExp = [p1, p2]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-
-    def test_getPathListIncludedInQueryCoord_all_not_included(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t10\t20\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t20\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.write("3\tchr5\t55\t60\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "10", "20", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "20", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr2", "55", "60", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getPathListIncludedInQueryCoord( "chr1", 50, 70 )
-        self.assertEquals(0, len(lObs))
-        
-        lExp = []
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-
-    def test_getPathListIncludedInQueryCoord_all_overlapping(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t10\t25\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t15\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getPathListIncludedInQueryCoord( "chr1", 13, 22 )
-        self.assertEquals(0, len(lObs))
-        
-        lExp = []
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-
-    def test_getPathListIncludedInQueryCoord_with_one_included_and_one_overlapping(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t10\t25\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t15\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getPathListIncludedInQueryCoord( "chr1", 9, 27 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p1]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-
-    def test_getPathListIncludedInQueryCoord_with_one_included_and_two_chained(self):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t10\t25\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("1\tchr1\t100\t250\tTE1\t11\t17\t1e-18\t20\t87.4\n")
-        pathF.write("2\tchr1\t15\t30\tTE2\t10\t13\t5e-24\t34\t93.1\n")
-        pathF.close()
-
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("1", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        lObs = self._tpA.getPathListIncludedInQueryCoord( "chr1", 9, 27 )
-        self.assertEquals(1, len(lObs))
-        lExp = [p1]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-    def test_deleteFromId_with_correct_ID(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.deleteFromId(3)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromId_with_not_exist_ID(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.deleteFromId(4)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (3, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),
-                        (3, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromId_with_multiple_ID(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.deleteFromId(2)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromIdList(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        lNumToRemove = [2, 3]
-        self._tpA.deleteFromIdList(lNumToRemove)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromIdList_with_empty_list(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        lNumToRemove = []
-        self._tpA.deleteFromIdList(lNumToRemove)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (3, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),
-                        (3, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromIdList_with_list_of_existing_and_not_existing_ID(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        lNumToRemove = [3, 4]
-        self._tpA.deleteFromIdList(lNumToRemove)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_deleteFromIdList_with_multiple_ID_on_BinPathTable(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("3", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        lNumToRemove = [3]
-        self._tpA.deleteFromIdList(lNumToRemove)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_joinTwoPaths_with_min_and_max_existing_IDs(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        expNewId = 1
-        obsNewId = self._tpA.joinTwoPaths(1, 2)
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (1, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (3, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (1, 1000000, "chr1", 100, 250, 1),
-                        (3, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_joinTwoPaths_with_min_ID_not_existing_and_max_ID_existing(self):
-        tuple = ("4", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("5", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("6", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        expNewId = 1
-        obsNewId = self._tpA.joinTwoPaths(1, 5)
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((4, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (1, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (6, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((4, 1000000, "chr1", 10, 25, 1),
-                        (1, 1000000, "chr1", 100, 250, 1),
-                        (6, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_joinTwoPaths_with_min_ID_existing_and_max_ID_not_existing(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        expNewId = 1
-        obsNewId = self._tpA.joinTwoPaths(1, 5)
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (3, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),
-                        (3, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_joinTwoPaths_with_min_and_max_not_existing_IDs(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        expNewId = 4
-        obsNewId = self._tpA.joinTwoPaths(4, 5)
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, "chr1", 10, 25, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (2, "chr1", 100, 250, "TE1", 11, 17, 1e-18, 20, 87.4),
-                        (3, "chr1", 15, 30, "TE2", 10, 13, 5e-24, 34, 93.1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-
-        sqlCmd = "SELECT * FROM %s_idx" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        expPathTuple = ((1, 1000000, "chr1", 10, 25, 1),
-                        (2, 1000000, "chr1", 100, 250, 1),
-                        (3, 1000000, "chr1", 15, 30, 1),)
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-    def test_getNewId(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        expNewId = 4
-        obsNewId = self._tpA.getNewId()
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_getNewId_with_empty_path_table(self):
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        expNewId = 1
-        obsNewId = self._tpA.getNewId()
-        
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_getSetListIncludedInQueryCoord_one_included(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        s2 = Set()
-        s2.setFromTuple(("2","TE1","chr1","100","250"))
-        expLSet = [s2]
-        obsLSet = self._tpA.getSetListIncludedInQueryCoord('chr1', 95, 300)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListIncludedInQueryCoord_one_overlapping(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        expLSet = []
-        obsLSet = self._tpA.getSetListIncludedInQueryCoord('chr1', 150, 200)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListIncludedInQueryCoord_with_no_result(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        expLSet = []
-        obsLSet = self._tpA.getSetListIncludedInQueryCoord('chr1', 5000, 6000)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListIncludedInQueryCoord_one_included_and_two_chain(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "1000", "2500", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-
-        tuple = ("3", "chr1", "50", "150", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        tuple = ("4", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p5 = Path()
-        p5.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.insert(p4)
-        self._tpA.insert(p5)
-        
-        s2 = Set()
-        s2.setFromTuple(("2","TE1","chr1","100","250"))
-        expLSet = [s2]
-        obsLSet = self._tpA.getSetListIncludedInQueryCoord('chr1', 95, 300)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListOverlappingQueryCoord_one_included(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        s2 = Set()
-        s2.setFromTuple(("2","TE1","chr1","100","250"))
-        expLSet = [s2]
-        obsLSet = self._tpA.getSetListOverlappingQueryCoord('chr1', 95, 300)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListOverlappingQueryCoord_one_overlapping(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        s2 = Set()
-        s2.setFromTuple(("2","TE1","chr1","100","250"))
-        expLSet = [s2]
-        obsLSet = self._tpA.getSetListOverlappingQueryCoord('chr1', 150, 200)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListOverlappingQueryCoord_with_no_result(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        expLSet = []
-        obsLSet = self._tpA.getSetListOverlappingQueryCoord('chr1', 5000, 6000)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListOverlappingQueryCoord_one_included_and_two_chain(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "1000", "2500", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-
-        tuple = ("3", "chr1", "50", "150", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p4 = Path()
-        p4.setFromTuple(tuple)
-        
-        tuple = ("4", "chr1", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p5 = Path()
-        p5.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        self._tpA.insert(p4)
-        self._tpA.insert(p5)
-        
-        s2 = Set()
-        s2.setFromTuple(("2","TE1","chr1","100","250"))
-        s4 = Set()
-        s4.setFromTuple(("3","TE1","chr1","50","150"))
-        expLSet = [s2, s4]
-        obsLSet = self._tpA.getSetListOverlappingQueryCoord('chr1', 95, 300)
-        
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getIdList( self ):
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n" )
-        p3 = Path()
-        p3.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n" )
-        p4 = Path()
-        p4.setFromString( "3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        
-        lPath = [ p1, p2, p3, p4]
-        self._tpA.insertList(lPath)
-        
-        expList = [ 1, 2, 3 ]
-        obsList = self._tpA.getIdList()
-        
-        self.assertEqual( expList, obsList )
-        
-    def test_getQueryList(self):
-        tuple = ("1", "chr1", "10", "25", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-
-        tuple = ("2", "chr1", "100", "250", "TE1", "11", "17", "1e-18", "20", "87.4")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-        
-        tuple = ("3", "chr2", "15", "30", "TE2", "10", "13", "5e-24", "34", "93.1")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path" )
-        self._db.createBinPathTable(self._table, True)
-        self._tpA = TableBinPathAdaptator( self._db, self._table )
-        self._tpA.insert(p1)
-        self._tpA.insert(p2)
-        self._tpA.insert(p3)
-        
-        expList = [ "chr1", "chr2" ]
-        obsList = self._tpA.getQueryList()
-        self.assertEqual( expList, obsList )
-
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TableBinPathAdaptator ) )
-if __name__ == '__main__':
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_TableBinSetAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,290 +0,0 @@
-import unittest
-import os
-import time
-from commons.core.sql.TableBinSetAdaptator import TableBinSetAdaptator
-from commons.core.coord.Set import Set
-from commons.core.sql.DbFactory import DbFactory
-
-class Test_TableBinSetAdaptator(unittest.TestCase):
-
-    def setUp(self):
-        self._uniqId = "%s_%s" % (time.strftime("%Y%m%d%H%M%S") , os.getpid())
-        self._iDb = DbFactory.createInstance()
-        radicalTableName = "dummySetTable"
-        self._tableName = "%s_%s" % (radicalTableName, self._uniqId)
-        self._tableName_bin = "%s_idx" % self._tableName
-        self._setFileName = "dummySetFile_%s" % self._uniqId
-        setF = open( self._setFileName, "w" )
-        setF.write("1\tseq1\tchr1\t1900\t3900\n")
-        setF.write("2\tseq2\tchr1\t2\t9\n")
-        setF.write("3\tseq3\tchr1\t8\t13\n")
-        setF.close()
-        self._iDb.createTable(self._tableName, "set", self._setFileName)
-        self._iTableBinSetAdaptator = TableBinSetAdaptator(self._iDb, self._tableName)
-       
-    def tearDown(self):
-        self._iDb.dropTable( self._tableName )
-        self._iDb.dropTable( self._tableName_bin )
-        self._iDb.close()
-        if os.path.exists(self._setFileName):
-            os.remove(self._setFileName)
-        
-    def test_insASetInSetAndBinTable(self):
-        iSet = Set(1, "set1", "seq1", 2, 1)
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insASetInSetAndBinTable(iSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L), (1L, 1000.0, 'seq1', 1L, 2L, 0L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L), (1L, 'set1', 'seq1', 2L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        
-    def test_insASetInSetAndBinTable_delayedCase(self):
-        iSet = Set(1, "set1", "seq1", 2, 1)
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insASetInSetAndBinTable(iSet, True)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L), (1L, 1000.0, 'seq1', 1L, 2L, 0L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L), (1L, 'set1', 'seq1', 2L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        
-    def test_deleteFromIdFromSetAndBinTable(self):
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.deleteFromIdFromSetAndBinTable(2)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (3L, 'seq3', 'chr1', 8L, 13L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-
-    def test_deleteFromListIdFromSetAndBinTable(self):
-        lSetToRemove = [1,2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.deleteFromListIdFromSetAndBinTable(lSetToRemove)
-        expTupleInBinTable = ((3L, 1000.0, 'chr1', 8L, 13L, 1L),)
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((3L, 'seq3', 'chr1', 8L, 13L),)
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        os.remove(self._setFileName)
-
-    def test_joinTwoSetsFromSetAndBinTable(self):
-        id1 = 1
-        id2 = 2
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsNewId = self._iTableBinSetAdaptator.joinTwoSetsFromSetAndBinTable(id1, id2)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (1L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L))
-        expNewId = 1
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (1L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_joinTwoSetsFromSetAndBinTable_with_reversed_id(self):
-        id1 = 2
-        id2 = 1
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsNewId = self._iTableBinSetAdaptator.joinTwoSetsFromSetAndBinTable(id1, id2)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (1L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L))
-        expNewId = 1
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (1L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_getNewId(self):   
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsNewId = self._iTableBinSetAdaptator.getNewId()
-        expNewId = 4
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_getNewId_empty_table(self):
-        self._iDb.dropTable( self._tableName )
-        self._iDb.dropTable( self._tableName_bin )
-        setF = open( self._setFileName, "w" )
-        setF.close()
-        self._iDb.createTable( self._tableName, "set", self._setFileName )
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsNewId = self._iTableBinSetAdaptator.getNewId()
-        expNewId = 1
-        self.assertEquals(expNewId, obsNewId)
-        
-    def test_getSetListFromQueryCoord(self):
-        start = 10
-        end = 4000
-        seqName = 'chr1'
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsLSet = self._iTableBinSetAdaptator.getSetListFromQueryCoord(seqName, start, end)
-        iSet1 = Set(1, "seq1", "chr1", 1900, 3900)
-        iSet2 = Set(3, "seq3", "chr1", 8, 13)
-        expLSet = [iSet1, iSet2]
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListFromQueryCoord_return_empty_list(self):
-        start = 4000
-        end = 40000
-        seqName = 'chr1'
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsLSet = self._iTableBinSetAdaptator.getSetListFromQueryCoord(seqName, start, end)
-        expLSet = []
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListStrictlyIncludedInQueryCoord(self):
-        start = 10
-        end = 4000
-        seqName = 'chr1'
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsLSet = self._iTableBinSetAdaptator.getSetListStrictlyIncludedInQueryCoord(seqName, start, end)
-        iSet1 = Set(1, "seq1", "chr1", 1900, 3900)
-        expLSet = [iSet1]
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getSetListStrictlyIncludedInQueryCoord_return_empty_list(self):
-        start = 4000
-        end = 40000
-        seqName = 'chr1'
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsLSet = self._iTableBinSetAdaptator.getSetListStrictlyIncludedInQueryCoord(seqName, start, end)
-        expLSet = []
-        self.assertEquals(expLSet, obsLSet)
-        
-    def test_getIdList(self):
-        expLId = [1,2,3]
-        self._iDb.createBinSetTable(self._tableName, True)
-        obsLId = self._iTableBinSetAdaptator.getIdList()
-        self.assertEquals(expLId, obsLId)
-        
-    def test_getSeqNameList(self):
-        self._iDb.dropTable( self._tableName )
-        self._iDb.dropTable( self._tableName_bin )
-        setF = open( self._setFileName, "w" )
-        setF.write("1\tseq1\tchr2\t1900\t3900\n")
-        setF.write("2\tseq2\tchr1\t2\t9\n")
-        setF.write("3\tseq3\tchr1\t8\t13\n")
-        setF.close()
-        self._iDb.createTable( self._tableName, "set", self._setFileName )
-        self._iDb.createBinSetTable(self._tableName, True)
-        expLSeqName = ["chr1", "chr2"]
-        obsLSeqName = self._iTableBinSetAdaptator.getSeqNameList()
-        self.assertEquals(expLSeqName, obsLSeqName)
-        
-    def test_insertListInSetAndBinTable(self):
-        iSet1 = Set(1, "seq4", "chr1", 100, 390)
-        iSet2 = Set(2, "seq5", "chr1", 1, 13)
-        lSet = [iSet1, iSet2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insertListInSetAndBinTable(lSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L), (4L, 1000.0, 'chr1', 100L, 390L, 1L), (4L, 1000.0, 'chr1', 1L, 13L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L), (4L, 'seq4', 'chr1', 100L, 390L), (4L, 'seq5', 'chr1', 1L, 13L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-        
-    def test_insertListInSetAndBinTableAndMergeAllSets(self):
-        iSet1 = Set(1, "seq4", "chr1", 100, 390)
-        iSet2 = Set(2, "seq5", "chr1", 1, 13)
-        lSet = [iSet1, iSet2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insertListInSetAndBinTableAndMergeAllSets(lSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (5L, 1000.0, 'chr1', 1L, 13L, 1L), (4L, 1000.0, 'chr1', 100L, 390L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (5L, 'seq5', 'chr1', 1L, 13L), (4L, 'seq4', 'chr1', 100L, 390L) )
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-
-    def test_insertListInSetAndBinTableAndRemoveOverlaps(self):
-        iSet1 = Set(1, "seq4", "chr1", 100, 390)
-        iSet2 = Set(2, "seq5", "chr1", 1, 13)
-        lSet = [iSet1, iSet2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insertListInSetAndBinTableAndRemoveOverlaps(lSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L), (4L, 1000.0, 'chr1', 100L, 390L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L), (4L, 'seq4', 'chr1', 100L, 390L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-
-    def test_insertListInSetAndBinTableAndRemoveOverlaps_Without_Overlaps(self):
-        iSet1 = Set(1, "seq4", "chr1", 100, 390)
-        iSet2 = Set(2, "seq5", "chr1", 50, 65)
-        lSet = [iSet1, iSet2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insertListInSetAndBinTableAndRemoveOverlaps(lSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L), (4L, 1000.0, 'chr1', 100L, 390L, 1L), (5L, 1000.0, 'chr1', 50L, 65L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L), (4L, 'seq4', 'chr1', 100L, 390L), (5L, 'seq5', 'chr1', 50L, 65L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-
-    def test_insertListInSetAndBinTableAndRemoveOverlaps_With_Only_Overlaps(self):
-        iSet1 = Set(1, "seq4", "chr1", 1, 5)
-        iSet2 = Set(2, "seq5", "chr1", 8, 13)
-        lSet = [iSet1, iSet2]
-        self._iDb.createBinSetTable(self._tableName, True)
-        self._iTableBinSetAdaptator.insertListInSetAndBinTableAndRemoveOverlaps(lSet)
-        expTupleInBinTable = ((1L, 10000.0, 'chr1', 1900L, 3900L, 1L), (2L, 1000.0, 'chr1', 2L, 9L, 1L), (3L, 1000.0, 'chr1', 8L, 13L, 1L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName_bin )
-        self._iDb.execute( sqlCmd )
-        obsTupleInBinTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInBinTable, obsTupleInBinTable)
-        expTupleInSetTable = ((1L, 'seq1', 'chr1', 1900L, 3900L), (2L, 'seq2', 'chr1', 2L, 9L), (3L, 'seq3', 'chr1', 8L, 13L))
-        sqlCmd = "SELECT * FROM %s" % ( self._tableName )
-        self._iDb.execute( sqlCmd )
-        obsTupleInSetTable = self._iDb.cursor.fetchall()
-        self.assertEquals(expTupleInSetTable, obsTupleInSetTable)
-                          
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Test_TableJobAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,640 +0,0 @@
-import unittest
-import sys
-import os
-import time
-#import stat
-#import threading
-from commons.core.sql.DbMySql import DbMySql
-#from commons.core.sql.DbSQLite import DbSQLite
-from commons.core.sql.Job import Job
-from commons.core.utils.FileUtils import FileUtils
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-
-#class Test_TableJobAdaptator_SQLite( unittest.TestCase ):
-#        
-#    def setUp(self):
-#        self._jobTableName = "dummyJobTable"
-#        self._dbName = "test.db"
-#        self._db = DbSQLite(self._dbName)
-#        self._iTJA = TableJobAdaptator(self._db, self._jobTableName)
-#        if not self._db.doesTableExist(self._jobTableName):
-#            self._db.createJobTable(self._jobTableName)
-#        self._iJob = self._createJobInstance()
-#        
-#    def tearDown(self):
-#        self._iTJA = None
-#        self._db.close()
-##        self._db.delete()
-#        
-##    def test_recordJob(self):
-##        self._iTJA.recordJob(self._iJob)
-##        qryParams = "SELECT jobid, groupid, command, launcher, queue, status, node FROM " + self._jobTableName + " WHERE jobid = ?" 
-##        params = (self._iJob.jobid,)
-##        self._db.execute(qryParams, params)
-##        tObs = self._db.fetchall()[0]
-##        tExp =(self._iJob.jobid, self._iJob.groupid, self._iJob.command, self._iJob.launcher, self._iJob.queue, "waiting", "?")
-##        self.assertEquals(tExp,tObs)
-##    
-##    def test_removeJob(self):
-##        self._iTJA.recordJob(self._iJob)
-##        self._iTJA.removeJob(self._iJob)
-##        self.assertTrue(self._db.isEmpty(self._jobTableName))
-##        
-##    def test_getJobStatus(self):
-##        self._iTJA.recordJob(self._iJob)
-##        expStatus = "waiting"
-##        obsStatus = self._iTJA.getJobStatus(self._iJob)
-##        self.assertEquals(expStatus, obsStatus)
-##        
-##    def test_getJobStatus_no_job(self):
-##        expStatus = "unknown"
-##        obsStatus = self._iTJA.getJobStatus(self._iJob)
-##        self.assertEquals(expStatus, obsStatus)
-##
-##    def test_getJobStatus_no_name(self):
-##        iJob = Job( self._jobTableName, 20, "", "groupid", "queue", "command", "launcherFile", "node", "lResources" ) 
-##        expStatus = "unknown"
-##        obsStatus = self._iTJA.getJobStatus(iJob)
-##        self.assertEquals(expStatus, obsStatus)
-##        
-##    def test_getJobStatus_two_jobs(self):
-##        # Warning : this case will not append, because recordJob() begin by removeJob()
-##        sqlCmd = "INSERT INTO %s" % self._iJob.tablename
-##        sqlCmd += " VALUES ("
-##        sqlCmd += " \"%s\"," % self._iJob.jobid
-##        sqlCmd += " \"%s\"," % self._iJob.jobname
-##        sqlCmd += " \"%s\"," % self._iJob.groupid
-##        sqlCmd += " \"%s\"," % self._iJob.command.replace("\"","\'")
-##        sqlCmd += " \"%s\"," % self._iJob.launcher
-##        sqlCmd += " \"%s\"," % self._iJob.queue
-##        sqlCmd += " \"waiting\","
-##        sqlCmd += " \"%s\"," % time.strftime( "%Y-%m-%d %H:%M:%S" )
-##        sqlCmd += " \"?\" );"
-##        self._db.execute(sqlCmd)
-##        self._db.execute(sqlCmd)
-##        
-##        expError = "expError.txt"
-##        expErrorHandler = open(expError, "w")
-##        expErrorHandler.write("ERROR while getting job status: non-unique jobs\n")
-##        expErrorHandler.close()
-##        obsError = "obsError.txt"
-##        obsErrorHandler = open(obsError, "w")
-##        stderrRef = sys.stderr
-##        sys.stderr = obsErrorHandler
-##        
-##        isSysExitRaised = False
-##        try:
-##            self._iTJA.getJobStatus(self._iJob)
-##        except SystemExit:
-##            isSysExitRaised = True
-##           
-##        obsErrorHandler.close()
-##        
-##        self.assertTrue(isSysExitRaised)
-##        self.assertTrue(FileUtils.are2FilesIdentical(expError, obsError))
-##        sys.stderr = stderrRef
-##        os.remove(obsError)
-##        os.remove(expError)
-##
-##    def test_changeJobStatus(self):
-##        expStatus = "finished"
-##        self._iTJA.recordJob(self._iJob)
-##        self._iTJA.changeJobStatus(self._iJob, expStatus)
-##        qryParams = "SELECT status FROM " + self._jobTableName + " WHERE jobid =? AND groupid=? AND queue=?" 
-##        params = (self._iJob.jobid, self._iJob.groupid, self._iJob.queue)
-##        self._db.execute(qryParams, params)
-##        obsStatus = self._db.fetchall()[0][0]
-##        self.assertEquals(expStatus, obsStatus)
-##        self._iTJA.removeJob(self._iJob)
-##        
-##    def test_getCountStatus(self):
-##        iJob1 = self._createJobInstance()
-##        iJob2 = Job(self._jobTableName, 1, "job2", "groupid", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        self._iTJA.recordJob(iJob1)
-##        self._iTJA.recordJob(iJob2)
-##        expCount = 2
-##        obsCount = self._iTJA.getCountStatus(self._jobTableName, iJob1.groupid, "waiting")
-##        self.assertEquals(expCount, obsCount)
-##        
-##    def test_getCountStatus_without_res(self):
-##        expCount = 0
-##        obsCount = self._iTJA.getCountStatus(self._jobTableName, "groupid", "waiting")
-##        self.assertEquals(expCount, obsCount)
-##   
-##    def test_cleanJobGroup(self):
-##        iJob1 = self._createJobInstance()
-##        iJob2 = Job(self._jobTableName, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        iJob3 = Job(self._jobTableName, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        self._iTJA.recordJob(iJob1)
-##        self._iTJA.recordJob(iJob2)
-##        self._iTJA.recordJob(iJob3)
-##        self._iTJA.cleanJobGroup(self._jobTableName, iJob1.groupid)
-##        qryParams = "SELECT count(*) FROM " + self._jobTableName  
-##        self._db.execute(qryParams)
-##        expCount = 1
-##        obsCount = self._db.fetchall()[0][0]
-##        self.assertEquals(expCount, obsCount)
-##                
-##    def test_hasUnfinishedJob_one_waiting_one_finished(self):
-##        iJob1 = self._createJobInstance()
-##        iJob2 = Job(self._jobTableName, 0, "jobname2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        iJob3 = Job(self._jobTableName, 0, "jobname3", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        self._iTJA.recordJob(iJob1)
-##        self._iTJA.recordJob(iJob2)
-##        self._iTJA.recordJob(iJob3)
-##        self._iTJA.changeJobStatus(iJob2, "finished")
-##        expHasGrpIdFinished = True
-##        obsHasGrpIdFinished = self._iTJA.hasUnfinishedJob(self._jobTableName, iJob1.groupid)
-##        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-##        
-##    def test_hasUnfinishedJob_jobTable_doesnt_exist(self):
-##        self._db.dropTable(self._jobTableName)
-##        expHasGrpIdFinished = False
-##        obsHasGrpIdFinished = self._iTJA.hasUnfinishedJob(self._jobTableName, self._iJob.groupid)
-##        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-##        
-##    def test_hasUnfinishedJob_all_jobs_finished_for_same_groupid(self): 
-##        iJob1 = self._createJobInstance()
-##        iJob2 = Job(self._jobTableName, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        iJob3 = Job(self._jobTableName, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-##        self._iTJA.recordJob(iJob1)
-##        self._iTJA.recordJob(iJob2)
-##        self._iTJA.recordJob(iJob3)
-##        self._iTJA.changeJobStatus(iJob1, "finished")
-##        self._iTJA.changeJobStatus(iJob2, "finished")
-##        expHasGrpIdFinished = False
-##        obsHasGrpIdFinished = self._iTJA.hasUnfinishedJob(self._jobTableName, iJob1.groupid)
-##        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-##
-##    def test_waitJobGroup_with_finished_job(self):
-##        obs = False
-##        self._iTJA.recordJob(self._iJob)
-##        self._iTJA.changeJobStatus(self._iJob, "finished")
-##        try:
-##            self._iTJA.waitJobGroup(self._jobTableName ,self._iJob.groupid, 0, 0)
-##        except SystemExit:
-##            obs = True
-##        self.assertFalse(obs)
-##        
-##    def test_waitJobGroup_with_error_job_maxRelaunch_zero(self):
-##        obs = False
-##        self._iTJA.recordJob(self._iJob)
-##        self._iTJA.changeJobStatus(self._iJob, "error")
-##        try:
-##            self._iTJA.waitJobGroup(self._jobTableName ,self._iJob.groupid, 0, 0)
-##        except SystemExit:
-##            obs = True
-##        self.assertTrue(obs)
-##        
-##    def test_setJobIdFromSge(self):
-##        self._iTJA.recordJob(self._iJob)
-##        self._iTJA.setJobIdFromSge(self._iJob, 1000)
-##        qryParams = "SELECT jobid FROM " + self._jobTableName + " WHERE jobname = ? AND queue = ? AND groupid = ?" 
-##        params = (self._iJob.jobname, self._iJob.queue, self._iJob.groupid)
-##        self._db.execute(qryParams, params)
-##        tObs = self._db.fetchall()[0]
-##        tExp =(1000,)
-##        self.assertEquals(tExp,tObs)
-##                
-##    def test_submitJob_8_fields_for_job_table(self):
-##        self._db.dropTable(self._jobTableName)
-##        sqlCmd = "CREATE TABLE " + self._jobTableName 
-##        sqlCmd += " ( jobid INT UNSIGNED"
-##        sqlCmd += ", groupid VARCHAR(255)"
-##        sqlCmd += ", command TEXT"
-##        sqlCmd += ", launcher VARCHAR(1024)"
-##        sqlCmd += ", queue VARCHAR(255)"
-##        sqlCmd += ", status VARCHAR(255)"
-##        sqlCmd += ", time DATETIME"
-##        sqlCmd += ", node VARCHAR(255) )"
-##        self._db.execute(sqlCmd)
-##        self._iTJA.submitJob(self._iJob)
-##        expFieldsNb = 9
-##        obsFieldsNb = len(self._db.getFieldList(self._jobTableName))
-##        self.assertEquals(expFieldsNb, obsFieldsNb)
-##        os.remove("jobid.stdout")
-##        
-##    def test_getNodesListByGroupId(self):
-##        iJob1 = Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources" )
-##        iJob2 = Job( self._jobTableName, 1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources" )
-##        iJob3 = Job( self._jobTableName, 2, "job3", "groupid2", "queue", "command", "launcherFile", "node3", "lResources" )
-##        self._insertJob(iJob1)
-##        self._insertJob(iJob2)
-##        self._insertJob(iJob3)
-##        expNodeList = ["node1", "node2"]
-##        obsNodeList = self._iTJA.getNodesListByGroupId(self._jobTableName, "groupid")
-##        self.assertEquals(expNodeList, obsNodeList)
-##        
-##    def test_getNodesListByGroupId_empty_list(self):
-##        iJob1 = Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources" )
-##        iJob2 = Job( self._jobTableName, 1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources" )
-##        iJob3 = Job( self._jobTableName, 2, "job3", "groupid32", "queue", "command", "launcherFile", "node3", "lResources" )
-##        self._insertJob(iJob1)
-##        self._insertJob(iJob2)
-##        self._insertJob(iJob3)
-##        expNodeList = []
-##        obsNodeList = self._iTJA.getNodesListByGroupId(self._jobTableName, "groupid3")
-##        self.assertEquals(expNodeList, obsNodeList)
-##
-##    def test_commitJob(self):
-##        iJob1 = Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources" )
-##        self._insertJob(iJob1)
-##        
-##        expJobStatus = "waiting"
-##        obsJobStatus = self._iTJA.getJobStatus(self._iJob)
-##        self.assertEquals(expJobStatus, obsJobStatus)
-##        expJobStatus = "waiting"
-##        obsJobStatus = self._iTJA.getJobStatus(self._iJob)
-##        self.assertEquals(expJobStatus, obsJobStatus)
-##        self._db.close()
-##        
-##        self._db = DbSQLite(self._dbName)
-##        self._iTJA = TableJobAdaptator(self._db, self._jobTableName)
-##        expJobStatus = "waiting"
-##        obsJobStatus = self._iTJA.getJobStatus(self._iJob)
-##        self.assertEquals(expJobStatus, obsJobStatus)
-##        
-##    def _insertJob(self, iJob):
-##        self._iTJA = TableJobAdaptator(self._db, self._jobTableName)        
-##        self._iTJA.removeJob( iJob )
-##        sqlCmd = "INSERT INTO %s" % ( iJob.tablename )
-##        sqlCmd += " VALUES ("
-##        sqlCmd += " \"%s\"," % ( iJob.jobid )
-##        sqlCmd += " \"%s\"," % ( iJob.jobname )
-##        sqlCmd += " \"%s\"," % ( iJob.groupid )
-##        sqlCmd += " \"%s\"," % ( iJob.command.replace("\"","\'") )
-##        sqlCmd += " \"%s\"," % ( iJob.launcher )
-##        sqlCmd += " \"%s\"," % ( iJob.queue )
-##        sqlCmd += " \"waiting\","
-##        sqlCmd += " \"%s\"," % ( time.strftime( "%Y-%m-%d %H:%M:%S" ) )
-##        sqlCmd += " \"%s\" );" % ( iJob.node )
-##        self._db.execute( sqlCmd )
-#
-##    def testRecordJob_in_parallel_with_2_thread(self) :
-##        job1 = Job(self._jobTableName, 0, "job1", "test", "", "date;sleep 5;date", "./launcherFileTest_job1.py")
-##        job2 = Job(self._jobTableName, 0, "job2", "test", "", "date;sleep 5;date", "./launcherFileTest_job2.py")
-##        
-##        db1 = DbSQLite('threadJobTable.db')
-##        db1.createJobTable(self._jobTableName)
-##        
-##        db2 = DbSQLite(self._dbName)
-##        
-##        iTJA1 = TableJobAdaptator(db1, self._jobTableName)
-##        iTJA2 = TableJobAdaptator(db2, self._jobTableName)
-##        
-##        iRJT1 = RecordJobThread(iTJA1, job1)
-##        iRJT2 = RecordJobThread(iTJA2, job2)
-##        iRJT1.start()
-##        iRJT2.start()
-##        
-##        while iRJT1.isAlive() or iRJT2.isAlive():
-##            time.sleep(5)
-##        
-##        expJobStatus = "waiting"
-##        obsJobStatus1 = iTJA1.getJobStatus(job1)
-##        obsJobStatus2 = iTJA2.getJobStatus(job2)
-##                
-##        self.assertEquals(expJobStatus, obsJobStatus1)
-##        self.assertEquals(expJobStatus, obsJobStatus2)
-##        db1.db.close()
-##        db1.delete()
-##        
-#
-#    def test_ThreadRecordJob_sqlite3_connection_object_different_instances(self):
-#        
-##        for i in range(1, 11):
-##            job = Job(self._jobTableName, 0, "job%s"% i, "test_Thread", "", "date;sleep 5;date", "./launcherFileTest_job%s.py" % i)
-##            db1 = DbSQLite(self._dbName)
-##            iTJA1 = TableJobAdaptator(db1, self._jobTableName)
-##            iRJT1 = RecordJobThread(iTJA1, job)
-#
-#        #self._db.createJobTable(self._jobTableName)
-#        
-#        for i in range(1, 30) :
-#            job = "job%s"% i
-#            db = "db%s"%i
-#            job = Job(self._jobTableName, 0, "job%s"% i, "test_Thread", "", "date;sleep 5;date", "./launcherFileTest_job%s.py" % i)
-#            db = DbSQLite(self._dbName)
-#            if i == 1 :
-#                db.createJobTable(self._jobTableName)
-#            iTJA = TableJobAdaptator(db, self._jobTableName)
-#            iRJT = RecordJobThread(iTJA, job)
-#            iRJT.start()
-#
-#            #while iRJT.isAlive() :
-#                #time.sleep(1)
-#            
-##        job1 = Job(self._jobTableName, 0, "job1", "test", "", "date;sleep 5;date", "./launcherFileTest_job1.py")
-##        self._createLauncherFile(job1)
-##        job2 = Job(self._jobTableName, 0, "job2", "test", "", "date;sleep 5;date", "./launcherFileTest_job2.py")
-##        self._createLauncherFile(job2)
-##        
-##        db1 = DbSQLite(self._dbName)
-##        db2 = DbSQLite(self._dbName)
-##        
-##        iTJA1 = TableJobAdaptator(db1, self._jobTableName)
-##        iTJA2 = TableJobAdaptator(db2, self._jobTableName)
-##        
-##        
-##        iRJT1 = RecordJobThread(iTJA1, job1)
-##        iRJT2 = RecordJobThread(iTJA2, job2)
-##        
-##        iRJT1.start()
-##        iRJT2.start()
-##    
-##        while iRJT1.isAlive() or iRJT2.isAlive():
-##            time.sleep(5)
-#
-#
-##        self.assertNotEquals(iRJT1._iTableJobAdaptator._iDb.db, iRJT2._iTableJobAdaptator._iDb.db)
-#        
-#
-#    def _createLauncherFile(self, iJob):
-#        jobFileHandler = open(iJob.launcher , "w")
-##        self.cdir
-##        self.job
-#        cDir = os.getcwd()
-#
-#        launcher = "#!/usr/bin/python\n"
-#        launcher += "import os\n"
-#        launcher += "import sys\n"
-#        
-#        launcher += "print \"system:\", os.uname()\n"
-#        launcher += "sys.stdout.flush()\n"
-#        
-#        newStatus = "running"
-#        launcher += "from commons.core.sql.Job import Job\n"
-#        launcher += "from commons.core.sql.DbSQLite import DbSQLite\n"
-#        launcher += "from commons.core.sql.TableJobAdaptator import TableJobAdaptator\n"
-#        launcher += "iJob = Job('%s', %s, '%s', '%s')\n" % (iJob.tablename, iJob.jobid, iJob.jobname, iJob.groupid)
-#        launcher += "iDb = DbSQLite('%s/%s')\n" % (cDir, self._dbName)
-#        launcher += "iTJA = TableJobAdaptator(iDb, '%s')\n" % self._jobTableName
-#        launcher += "if not iDb.doesTableExist('%s'):\n" % (iJob.tablename)
-#        launcher += "\tiDb.createJobTable('%s')\n" % self._jobTableName
-#        
-#        launcher += "iTJA.changeJobStatus(iJob, '%s')\n" % newStatus
-#        
-#        launcher += "print \"LAUNCH: " + iJob.command + "\"\n"
-#        launcher += "sys.stdout.flush()\n"
-#        launcher += "exitStatus = os.system (\"" + iJob.command + "\")\n"
-#        launcher += "if exitStatus != 0:\n"
-#        launcher += "\tprint \"ERROR: " + iJob.command + " returned exit status '%i'\" % ( exitStatus )\n"
-#        
-#        newStatus = "finished"
-#        launcher += "iTJA.changeJobStatus(iJob, '%s')\n" % newStatus
-#        launcher += "iDb.close()\n"
-#        
-#        launcher += "sys.exit(0)\n"
-#        jobFileHandler.write(launcher)
-#        jobFileHandler.close()
-#        os.chmod(iJob.launcher, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
-#         
-#    def _createJobInstance(self):
-#        return Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node", "lResources" )
-         
-
-class Test_TableJobAdaptator_MySQL( unittest.TestCase ):
-
-    def setUp(self):
-        self._jobTableName = "dummyJobTable"
-        self._db = DbMySql()
-        self._iTJA = TableJobAdaptatorFactory.createInstance(self._db, self._jobTableName)   
-        self._db.createTable(self._jobTableName, "jobs", overwrite = True)
-        self._iJob = self._createJobInstance()      
-          
-    def tearDown(self):
-        self._db.dropTable(self._jobTableName)
-        self._iTJA = None
-        self._db.close()
-                     
-    def test_recordJob(self):
-        self._iTJA.recordJob(self._iJob)
-        qryParams = "SELECT jobid, jobname, groupid, launcher, queue, resources, status, node FROM " + self._jobTableName + " WHERE jobid = %s"
-        params = (self._iJob.jobid)
-        self._db.execute(qryParams, params)
-        tObs = self._db.fetchall()[0]
-        tExp =(self._iJob.jobid, self._iJob.jobname, self._iJob.groupid, self._iJob.launcher, self._iJob.queue, "['mem_free=10M']", "waiting", "?")
-        self.assertEquals(tExp,tObs)
-
-    def test_removeJob(self):
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.removeJob(self._iJob)
-        isTableEmpty = self._db.isEmpty(self._jobTableName)
-        self.assertTrue(isTableEmpty)
-    
-    def test_getJobStatus(self):
-        self._iTJA.recordJob(self._iJob)
-        expStatus = "waiting"
-        obsStatus = self._iTJA.getJobStatus(self._iJob)
-        self.assertEquals(expStatus, obsStatus)
-    
-    def test_getJobStatus_no_job(self):
-        expStatus = "unknown"
-        obsStatus = self._iTJA.getJobStatus(self._iJob)
-        self.assertEquals(expStatus, obsStatus)
-
-    def test_getJobStatus_no_name(self):
-        iJob = Job(self._jobTableName, 20, "", "groupid", "queue", "command", "launcherFile", "node", "lResources") 
-        expStatus = "unknown"
-        obsStatus = self._iTJA.getJobStatus(iJob)
-        self.assertEquals(expStatus, obsStatus)
-            
-    def test_getJobStatus_two_jobs(self):
-        # Warning : this case will not append, because recordJob() begin by removeJob()
-        sqlCmd = "INSERT INTO %s" % self._jobTableName
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % self._iJob.jobid
-        sqlCmd += " \"%s\"," % self._iJob.jobname
-        sqlCmd += " \"%s\"," % self._iJob.groupid
-        sqlCmd += " \"%s\"," % self._iJob.launcher
-        sqlCmd += " \"%s\"," % self._iJob.queue
-        sqlCmd += " \"%s\"," % self._iJob.lResources
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % time.strftime("%Y-%m-%d %H:%M:%S")
-        sqlCmd += " \"?\" );"
-        self._db.execute(sqlCmd)
-        self._db.execute(sqlCmd)
-        
-        expError = "expError.txt"
-        expErrorHandler = open(expError, "w")
-        expErrorHandler.write("ERROR while getting job status: non-unique jobs\n")
-        expErrorHandler.close()
-        obsError = "obsError.txt"
-        obsErrorHandler = open(obsError, "w")
-        stderrRef = sys.stderr
-        sys.stderr = obsErrorHandler
-        
-        isSysExitRaised = False
-        try:
-            self._iTJA.getJobStatus(self._iJob)
-        except SystemExit:
-            isSysExitRaised = True
-        obsErrorHandler.close()
-        self.assertTrue(isSysExitRaised)
-        self.assertTrue(FileUtils.are2FilesIdentical(expError, obsError))
-        sys.stderr = stderrRef
-        os.remove(obsError)
-        os.remove(expError)
-        
-    def test_changeJobStatus(self):
-        expStatus = "finished"
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.changeJobStatus(self._iJob, expStatus)
-        qryParams = "SELECT status FROM " + self._jobTableName + " WHERE jobid =%s AND groupid=%s AND queue=%s" 
-        params = (self._iJob.jobid, self._iJob.groupid, self._iJob.queue)
-        self._db.execute(qryParams, params)
-        obsStatus = self._db.fetchall()[0][0]
-        self.assertEquals(expStatus, obsStatus)
-        
-    def test_getCountStatus(self):
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(1, "job2", "groupid", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        self._iTJA.recordJob(iJob1)
-        self._iTJA.recordJob(iJob2)
-        expCount = 2
-        obsCount = self._iTJA.getCountStatus(iJob1.groupid, "waiting")
-        self.assertEquals(expCount, obsCount)
-        
-    def test_getCountStatus_without_res(self):
-        expCount = 0
-        obsCount = self._iTJA.getCountStatus("groupid", "waiting")
-        self.assertEquals(expCount, obsCount)
-
-    def test_cleanJobGroup(self):
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(2, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(3, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        self._iTJA.recordJob(iJob1)
-        self._iTJA.recordJob(iJob2)
-        self._iTJA.recordJob(iJob3)
-        self._iTJA.cleanJobGroup(iJob1.groupid)
-        qryParams = "SELECT count(*) FROM %s" % self._jobTableName  
-        self._db.execute(qryParams)
-        expCount = 1
-        obsCount = self._db.fetchall()[0][0]
-        self.assertEquals(expCount, obsCount)
-  
-    def test_hasUnfinishedJob_one_waiting_one_finished(self):
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(0, "jobname2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(0, "jobname3", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        self._iTJA.recordJob(iJob1)
-        self._iTJA.recordJob(iJob2)
-        self._iTJA.recordJob(iJob3)
-        self._iTJA.changeJobStatus(iJob2, "finished")
-        expHasGrpIdFinished = True
-        obsHasGrpIdFinished = self._iTJA.hasUnfinishedJob(iJob1.groupid)
-        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-        
-    def test_hasUnfinishedJob_all_jobs_finished_for_same_groupid(self): 
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(2, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(3, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        self._iTJA.recordJob(iJob1)
-        self._iTJA.recordJob(iJob2)
-        self._iTJA.recordJob(iJob3)
-        self._iTJA.changeJobStatus(iJob1, "finished")
-        self._iTJA.changeJobStatus(iJob2, "finished")
-        expHasGrpIdFinished = False
-        obsHasGrpIdFinished = self._iTJA.hasUnfinishedJob(iJob1.groupid)
-        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-
-    def test_waitJobGroup_with_finished_job(self):
-        obs = False
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.changeJobStatus(self._iJob, "finished")
-        try:
-            self._iTJA.waitJobGroup(self._iJob.groupid, 0, 0)
-        except SystemExit:
-            obs = True
-        self.assertFalse(obs)
-        
-    def test_waitJobGroup_with_error_job_maxRelaunch_zero(self):
-        obs = False
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.changeJobStatus(self._iJob, "error")
-        try:
-            self._iTJA.waitJobGroup(self._iJob.groupid, 0, 0)
-        except SystemExit:
-            obs = True
-        self.assertTrue(obs)
-        
-    #TODO: how to test ?!?
-#    def test_waitJobGroup_with_error_relaunch(self):
-#        iJob = Job(0, "job1", "groupid", "queue.q", "command", "launcherFile", "node", ["mem_free=10M", "test=TRUE"])
-#        obs = False
-#        self._iTJA.recordJob(iJob)
-#        self._iTJA.changeJobStatus(iJob, "error")
-#        try:
-#            self._iTJA.waitJobGroup(iJob.groupid)
-#        except SystemExit:
-#            obs = True
-#        self.assertTrue(obs)
-    
-    def test_updateJobIdInDB(self):
-        self._iTJA.recordJob(self._iJob)
-        self._iTJA.updateJobIdInDB(self._iJob, 1000)
-        qryParams = "SELECT jobid FROM " + self._jobTableName + " WHERE jobname = %s AND queue = %s AND groupid = %s" 
-        params = (self._iJob.jobname, self._iJob.queue, self._iJob.groupid)
-        self._db.execute(qryParams, params)
-        tObs = self._db.fetchall()[0]
-        tExp =(1000,)
-        self.assertEquals(tExp,tObs)
-
-    def test_getNodesListByGroupId(self):
-        iJob1 = Job(0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources")
-        iJob2 = Job(1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources")
-        iJob3 = Job(2, "job3", "groupid", "queue", "command", "launcherFile", "node2", "lResources")
-        iJob4 = Job(3, "job4", "groupid2", "queue", "command", "launcherFile", "node3", "lResources")
-        self._insertJob(iJob1)
-        self._insertJob(iJob2)
-        self._insertJob(iJob3)
-        self._insertJob(iJob4)
-        expNodeList = ["node1", "node2"]
-        obsNodeList = self._iTJA.getNodesListByGroupId("groupid")
-        self.assertEquals(expNodeList, obsNodeList)
-
-    def test_getNodesListByGroupId_empty_list(self):
-        iJob1 = Job(0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources")
-        iJob2 = Job(1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources")
-        iJob3 = Job(2, "job3", "groupid32", "queue", "command", "launcherFile", "node3", "lResources")
-        self._insertJob(iJob1)
-        self._insertJob(iJob2)
-        self._insertJob(iJob3)
-        expNodeList = []
-        obsNodeList = self._iTJA.getNodesListByGroupId("groupid3")
-        self.assertEquals(expNodeList, obsNodeList)
-        
-# TODO test TableJobAdaptator._createJobInstance  TableJobAdaptator._createLauncherFile
-    def _insertJob(self, iJob):
-        self._iTJA = TableJobAdaptatorFactory.createInstance(self._db, self._jobTableName)        
-        self._iTJA.removeJob(iJob)
-        sqlCmd = "INSERT INTO %s" % self._jobTableName
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % iJob.jobid
-        sqlCmd += " \"%s\"," % iJob.jobname
-        sqlCmd += " \"%s\"," % iJob.groupid
-        sqlCmd += " \"%s\"," % iJob.launcher
-        sqlCmd += " \"%s\"," % iJob.queue
-        sqlCmd += " \"%s\"," % iJob.lResources
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % time.strftime("%Y-%m-%d %H:%M:%S")
-        sqlCmd += " \"%s\" );" % iJob.node
-        self._db.execute(sqlCmd)
-
-    def _createJobInstance(self):
-        return Job(0, "job1", "groupid", "", "command", "launcherFile", "node", ["mem_free=10M"])
-
-#class RecordJobThread(threading.Thread):
-#
-#    def __init__(self, iTableJobAdaptator, iJob):
-#        threading.Thread.__init__(self)
-#        self._iTableJobAdaptator = iTableJobAdaptator
-#        self._iJob = iJob
-#        
-#    def run(self):
-#        self._iTableJobAdaptator.recordJob(self._iJob)
-#        #self._iTableJobAdaptator.submitJob(self._iJob)
-                                             
-if __name__ == "__main__":
-    unittest.main()
--- a/commons/core/sql/test/Test_TableJobAdaptatorFactory.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-import os
-import unittest
-from commons.core.sql.TableJobAdaptatorFactory import TableJobAdaptatorFactory
-from commons.core.sql.DbFactory import DbFactory
-
-class Test_TableJobAdaptatorFactory(unittest.TestCase):
-
-    def test_createInstance_SGE(self):
-        REPET_JOB_MANAGER_Initial_Value = os.environ["REPET_JOB_MANAGER"]
-        os.environ["REPET_JOB_MANAGER"] = "SGE"
-        instance = TableJobAdaptatorFactory.createInstance(DbFactory.createInstance(), "dummyJobTable")
-        obsClassName = instance.__class__.__name__
-        expClassName = "TableJobAdaptatorSGE"
-        os.environ["REPET_JOB_MANAGER"] = REPET_JOB_MANAGER_Initial_Value
-        self.assertEquals(expClassName, obsClassName)
-
-    def test_createInstance_Torque(self):
-        REPET_JOB_MANAGER_Initial_Value = os.environ["REPET_JOB_MANAGER"]
-        os.environ["REPET_JOB_MANAGER"] = "Torque"
-        instance = TableJobAdaptatorFactory.createInstance(DbFactory.createInstance(), "dummyJobTable")
-        obsClassName = instance.__class__.__name__
-        expClassName = "TableJobAdaptatorTorque"
-        os.environ["REPET_JOB_MANAGER"] = REPET_JOB_MANAGER_Initial_Value
-        self.assertEquals(expClassName, obsClassName)
-
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Test_TableMapAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,250 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import time
-import os
-from commons.core.sql.TableMapAdaptator import TableMapAdaptator
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.coord.Map import Map
-from commons.core.coord.Set import Set
-
-
-class Test_TableMapAdaptator( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._uniqId = "%s_%s" % ( time.strftime("%Y%m%d%H%M%S") , os.getpid() )
-        self._configFileName = "dummyConfigFile_%s" % ( self._uniqId )
-        configF = open(self._configFileName, "w" )
-        configF.write( "[repet_env]\n" )
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-        self._iDb = DbMySql( cfgFileName=self._configFileName )
-        self._table = "dummyMapTable_%s" % ( self._uniqId )
-        self._tMapA = TableMapAdaptator( self._iDb, self._table )
-        
-        
-    def tearDown( self ):
-        self._uniqId = None
-        self._iDb.dropTable( self._table )
-        self._iDb.close()
-        self._table = None
-        self._tMapA = None
-        os.remove( self._configFileName )
-        self._configFileName = ""
-        
-##################################################################################
-################## Tests for methods in ITableMapAdaptator #######################
-##################################################################################    
-
-    def test_getEndFromSeqName(self):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        for m in [ map1, map2]:
-            self._tMapA.insert(m)
-        expEnd = 20
-        obsEnd = self._tMapA.getEndFromSeqName("desc2")
-        self.assertEqual(expEnd, obsEnd)     
-        
-
-    def test_getMapListFromSeqName( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name2\tdesc2\t1\t50\n" )
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        lExp = [ map2, map3 ]
-        lObs = self._tMapA.getMapListFromSeqName("name2")
-        self.assertEqual( lObs, lExp )     
-
-    def test_getMapListFromChr( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tchr1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tchr2\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name2\tchr2\t1\t50\n" )
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        lExp = [ map2, map3 ]
-        lObs = self._tMapA.getMapListFromChr("chr2")
-        self.assertEqual( lObs, lExp )
-
-    def test_getSeqNameList(self):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name2\tdesc2\t1\t50\n" )
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        lExp = ["desc1", "desc2"]
-        lObs = self._tMapA.getSeqNameList()
-        self.assertEqual( lObs, lExp )
-        
-    def test_insert_one_element( self ):
-        map2Insert = Map()
-        map2Insert.name="name1"
-        map2Insert.seqname="name2"
-        map2Insert.start=1L
-        map2Insert.end=50L
-        self._iDb.createTable( self._table, "map", "" )
-        self._tMapA.insert( map2Insert )
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        expTmapTuple = (('name1', 'name2', 1L, 50L),)
-        obsTmapTuples = self._iDb.cursor.fetchall()
-        self.assertEquals( expTmapTuple, obsTmapTuples )
-        
-    def test_insert_two_elements( self ):   
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        self._iDb.createTable( self._table, "map", "" )
-        for m in [ map1, map2 ]: self._tMapA.insert( m )
-        expTmapTuple = ( ('name1', 'desc1', 1L, 120L), ('name2', 'desc2', 1L, 20L) )
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTmapTuples = self._iDb.cursor.fetchall()
-        self.assertEquals(expTmapTuple, obsTmapTuples )
-        
-    def test_insertList( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        lmap = [ map1, map2 ]
-        self._tMapA.insertList( lmap )
-        lExp = lmap
-        lObs = self._tMapA.getListOfAllMaps()
-        self.assertEqual( lObs, lExp )
-        
-    def test_getSetListFromSeqName( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name2\tdesc2\t1\t50\n" )
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        explMap = [Set( 1,"name2", "desc2", 1, 20), Set( 2,"name2", "desc2", 1, 50)]
-        obslMap = self._tMapA.getSetListFromSeqName("name2")
-        self.assertEqual( explMap, obslMap )
-        
-    def test_getMapListOverlappingCoord( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t70\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc1\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name3\tdesc1\t1\t50\n" ) 
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        explMap = [Map("name2", "desc1", 1, 20), Map("name3", "desc1", 1, 50)]
-        obslMap = self._tMapA.getMapListOverlappingCoord("desc1", 1, 60)
-        self.assertEqual( explMap, obslMap )
-        
-    def test_getSetListOverlappingCoord( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t70\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc1\t1\t20\n" )
-        map3 = Map()
-        map3.setFromString( "name3\tdesc1\t1\t50\n" ) 
-        for m in [ map1, map2, map3 ]: self._tMapA.insert( m )
-        explSet = [Set(1, "name2", "desc1", 1, 20), Set(2, "name3", "desc1", 1, 50)]
-        obslSet = self._tMapA.getSetListOverlappingCoord("desc1", 1, 60)
-        self.assertEqual( explSet, obslSet )
-        
-##################################################################################
-########################### Tests for other methods ##############################
-##################################################################################
-        
-    def test_getListOfAllMaps( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        map1 = Map()
-        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-        map2 = Map()
-        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-        for m in [ map1, map2 ]: self._tMapA.insert( m )
-        lExp = [ map1, map2 ]
-        lObs = self._tMapA.getListOfAllMaps()
-        self.assertEqual( lObs, lExp )
-        
-    def test_getDictPerNameFromMapFile( self ):
-        self._iDb.createTable( self._table, "map", "" )
-        iMap1 = Map( "chunk1", "chromosome1", 1, 100 )
-        iMap2 = Map( "chunk2", "chromosome1", 91, 190 )
-        iMap3 = Map( "chunk3", "chromosome2", 1, 100 )
-        iMap4 = Map( "chunk1", "chromosome1", 1, 100 )  # redundant with iMap1
-        for iMap in [ iMap1, iMap2, iMap3, iMap4 ]:
-            self._tMapA.insert( iMap )
-        dExp = { "chunk1": iMap1, "chunk2": iMap2, "chunk3": iMap3 }
-        dObs = self._tMapA.getDictPerName()
-        self.assertEquals( dExp, dObs )
-        
-#TODO: Check getListFromSeqName method: uses name instead of seqname
-#    def test_getMapListFromSeqNameList( self ):
-#        self._iDb.createTable( self._table, "map", "" )
-#        map1 = Map()
-#        map1.setFromString( "name1\tdesc1\t1\t120\n" )
-#        map2 = Map()
-#        map2.setFromString( "name2\tdesc2\t1\t20\n" )
-#        map3 = Map()
-#        map3.setFromString( "name3\tdesc2\t1\t10\n" )
-#        map4 = Map()
-#        map4.setFromString( "name4\tdesc3\t10\t200\n" )
-#        for m in [map1, map2, map3, map4]: self._tMapA.insert( m )
-#        
-#        lMapToRetrieve = ["name1", "desc2"]
-#        lExp = [map1, map2, map3]
-#        lObs = self._tMapA.getMapListFromSeqNameList(lMapToRetrieve)
-#        self.assertEqual( lObs, lExp )
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TableMapAdaptator ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_TableMatchAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,264 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import time
-import os
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.coord.Match import Match
-from commons.core.sql.TableMatchAdaptator import TableMatchAdaptator
-
-
-class Test_TableMatchAdaptator( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._uniqId = "%s_%s" % (time.strftime("%Y%m%d%H%M%S") , os.getpid())
-        self._configFileName = "dummyConfigFile_%s" % self._uniqId
-        self._iDb = DbMySql()
-        self._table = "dummyMatchTable_%s" % self._uniqId
-        self._tMatchA = TableMatchAdaptator( self._iDb, self._table )
-        
-    def tearDown( self ):
-        self._uniqId = None
-        self._iDb.dropTable( self._table )
-        self._iDb.close()
-        self._table = None
-        self._tMatchA = None
-        
-##################################################################################
-################## Tests for methods in ITableMatchAdaptator #####################
-##################################################################################  
-    def test_insert(self):
-        match = Match()  
-
-        tuple = ("QName1", 1, 5, 5, 0.1, 0.2, "SName1", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-       
-        match.setFromTuple(tuple)
-                              
-        self._iDb.createTable( self._table, "match", "" )        
-        self._tMatchA.insert( match, False )
-        
-        expTMatchTuple = (('QName1', 1L, 5L, 5L, 0.1, 0.2, 'SName1', 5L, 25L, 20L, 0.15, 1e-20, 15L, 87.2, 1L),)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTmatchTuple = self._iDb.cursor.fetchall()
-        
-        self.assertEquals( expTMatchTuple, obsTmatchTuple )
-        
-
-    def test_insert_empty_match(self):
-        match = Match()  
-
-        tuple = ("", -1, -1, 5, 0.1, 0.2, "SName1", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-       
-        match.setFromTuple(tuple)
-                              
-        self._iDb.createTable( self._table, "match", "" )        
-        self._tMatchA.insert( match, False )
-        
-        expTMatchTuple = ()
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTmatchTuple = self._iDb.cursor.fetchall()
-        
-        self.assertEquals( expTMatchTuple, obsTmatchTuple )  
-               
-    
-    def test_insertList(self):
-        match1 = Match() 
-        match2 = Match()   
-
-        tuple1 = ("QName1", 1, 5, 5, 0.1, 0.2, "SName1", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName2", 2, 5, 5, 0.1, 0.2, "SName2", 6, 25, 20, 0.15, 1e-20, 15, 87.2, 2)
-       
-        match1.setFromTuple(tuple1)
-        match2.setFromTuple(tuple2)
-                              
-        self._iDb.createTable( self._table, "match", "" )        
-        self._tMatchA.insertList( [ match1, match2 ], False )
-        
-        expTMatchTuple = (('QName1', 1L, 5L, 5L, 0.1, 0.2, 'SName1', 5L, 25L, 20L, 0.15, 1e-20, 15L, 87.2, 1L),\
-                          ('QName2', 2L, 5L, 5L, 0.1, 0.2, 'SName2', 6L, 25L, 20L, 0.15, 1e-20, 15L, 87.2, 2L))
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTmatchTuple = self._iDb.cursor.fetchall()
-        
-        self.assertEquals( expTMatchTuple, obsTmatchTuple )
-        
-        
-    def test_getMatchListFromQuery(self):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        expListMatch = [ match1, match2, match3, match4 ]
-        self._tMatchA.insertList(expListMatch)
-        
-        obsListMatch = self._tMatchA.getMatchListFromQuery("QName")
-        
-        self.assertEquals(expListMatch, obsListMatch)
-        
-        
-    def test_getMatchListFromQuery_unexisted_seq_name(self):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        lMatch = [ match1, match2, match3, match4 ]
-        self._tMatchA.insertList(lMatch)
-        
-        expListMatch = []
-        obsListMatch = self._tMatchA.getMatchListFromQuery("Dummy")
-        
-        self.assertEquals(expListMatch, obsListMatch)
-        
-
-    def test_getMatchListFromId(self):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        lMatch = [ match1, match2, match3, match4 ]
-        expListMatch = [ match1 ]
-        self._tMatchA.insertList(lMatch)
-        
-        obsListMatch = self._tMatchA.getMatchListFromId(1)
-        
-        self.assertEquals(expListMatch, obsListMatch)
-        
-        
-    def test_getMatchListFromIdList_empty_id_list( self ):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        lMatch = [ match1, match2, match3, match4 ]
-        self._tMatchA.insertList(lMatch)
-        
-        expList = []
-        obsList = self._tMatchA.getMatchListFromIdList([])
-        self.assertEquals(expList, obsList)
-        
-        
-    def test_getMatchListFromIdList( self ):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        lMatch = [ match1, match2, match3, match4 ]
-        self._tMatchA.insertList(lMatch)
-        
-        lObs = self._tMatchA.getMatchListFromIdList((1, 2, 3))
-        
-        lExp = [match1, match2, match3]
-        self.assertEquals(lExp, lObs)
-        
-    def test_getListOfAllMatches( self ):
-        self._iDb.createTable( self._table, "match", "" )
-        tuple1 = ("QName", 1, 5, 5, 0.1, 0.2, "SName", 5, 25, 20, 0.15, 1e-20, 15, 87.2, 1)
-        tuple2 = ("QName", 1, 6, 6, 0.2, 0.1, "SName", 6, 26, 10, 0.18, 1e-30, 18, 85.2, 2)
-        tuple3 = ("QName", 1, 7, 8, 0.1, 0.2, "SName", 5, 20, 15, 0.20, 1e-25, 20, 89.0, 3)
-        tuple4 = ("QName", 1, 8, 8, 0.1, 0.1, "SName", 5, 15, 10, 0.17, 1e-23, 14, 89.5, 4)
-        match1 = Match()
-        match1.setFromTuple( tuple1 )
-        match2 = Match()
-        match2.setFromTuple( tuple2 )
-        match3 = Match()
-        match3.setFromTuple( tuple3 )
-        match4 = Match()
-        match4.setFromTuple( tuple4 )
-        lMatch = [ match1, match2, match3, match4 ]
-        expList = [ match1, match2, match3, match4 ]
-        self._tMatchA.insertList(lMatch)
-
-        obsList = self._tMatchA.getListOfAllMatches()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getListOfAllMatches_empty_table( self ):
-        self._iDb.createTable( self._table, "match", "" )
-        expList = []
-        obsList = self._tMatchA.getListOfAllMatches()
-        self.assertEqual( expList, obsList )
-        
-            
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TableMatchAdaptator ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_TablePathAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1376 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import os
-import time
-from commons.core.sql.TablePathAdaptator import TablePathAdaptator
-from commons.core.coord.Path import Path
-from commons.core.coord.Set import Set
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.coord.Range import Range
-from commons.core.coord.PathUtils import PathUtils
-from copy import deepcopy
-
-class Test_TablePathAdaptator( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._uniqId = "%s_%s" % ( time.strftime("%Y%m%d%H%M%S") , os.getpid() )
-        self._configFileName = "dummyConfigFile_%s" % ( self._uniqId )
-        configF = open(self._configFileName, "w" )
-        configF.write( "[repet_env]\n" )
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-        self._db = DbMySql( cfgFileName = self._configFileName )
-        self._table = "dummyPathTable_%s" % ( self._uniqId )
-        self._tpA = TablePathAdaptator( self._db, self._table )
-        
-        
-    def tearDown( self ):
-        self._uniqId = None
-        self._db.dropTable( self._table )
-        self._db.close()
-        self._table = None
-        self._tMatchA = None
-        os.remove( self._configFileName )
-        self._configFileName = ""  
-        
-        
-##################################################################################
-################## Tests for methods in ITableMapAdaptator #######################
-##################################################################################       
-     
-    def test_getPathListFromId( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write( "1\tchr1\t1\t6\tTE2\t11\t16\t1e-20\t30\t90.2\n" )
-        pathF.write( "2\tchr1\t1001\t1006\tTE2\t11\t16\t1e-20\t30\t90.2\n" )
-        pathF.write( "2\tchr1\t1201\t1226\tTE2\t10\t26\t1e-40\t70\t87.2\n" )
-        pathF.close()
-        p1 = Path()
-        p1.setFromString( "2\tchr1\t1001\t1006\tTE2\t11\t16\t1e-20\t30\t90.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr1\t1201\t1226\tTE2\t10\t26\t1e-40\t70\t87.2\n" )
-        lExp = [ p1, p2 ]
-        self._db.createTable( self._table, "path", pathFileName )
-        lObs = self._tpA.getPathListFromId( 2 )
-        self.assertEqual( lObs, lExp )
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromIdList_empty_id_list( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr1\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        
-        expList = []
-        obsList = self._tpA.getPathListFromIdList([])
-        self.assertEquals(expList, obsList)
-        
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromIdList( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr1\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-
-        tuple = ("1", "chr1","1", "10","TE2","11","17","1e-20","30","90.2")
-        p1 = Path()
-        p1.setFromTuple(tuple)
-        
-        tuple = ("2", "chr1","2", "9","TE2","10","13","1e-20","30","90.2")
-        p2 = Path()
-        p2.setFromTuple(tuple)
-
-        tuple = ("3", "chr1","8", "13","TE2","11","17","1e-20","30","90.2")
-        p3 = Path()
-        p3.setFromTuple(tuple)
-        
-        self._db.createTable( self._table, "path", pathFileName )
-        lObs = self._tpA.getPathListFromIdList((1, 2, 3))
-        self.assertEquals(3, len(lObs))
-        
-        lExp = [p1, p2, p3]
-        self.assertEquals(lExp, lObs)
-        
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromQuery( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        path1 = Path()
-        path1.setFromString("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        path2 = Path()
-        path2.setFromString("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        expList = [ path1, path2 ]
-        obsList = self._tpA.getPathListFromQuery("chr1")
-        self.assertEquals( expList, obsList )
-        
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromQuery_unexisted_query( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = []
-        obsList = self._tpA.getPathListFromQuery("dummy")
-        self.assertEquals( expList, obsList )
-        
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromSubject( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE3\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        path1 = Path()
-        path1.setFromString("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        path3 = Path()
-        path3.setFromString("3\tchr2\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        expList = [ path1, path3 ]
-        obsList = self._tpA.getPathListFromSubject("TE2")
-        self.assertEquals( expList, obsList )
-
-        os.remove( pathFileName )
-        
-        
-    def test_getPathListFromSubject_unexisted_subject( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE3\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE2\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = []
-        obsList = self._tpA.getPathListFromSubject("dummy")
-        self.assertEquals( expList, obsList )
-
-        os.remove( pathFileName )
-        
-        
-    def test_insert_QryDirSbjDir( self ):
-        path2Insert = Path()
-        
-        rangeQuery2Insert = Range()
-        rangeQuery2Insert.seqname = "chr1"
-        rangeQuery2Insert.start = 1
-        rangeQuery2Insert.end = 10
-        
-        rangeSubject2Insert = Range()
-        rangeSubject2Insert.seqname = "TE2"
-        rangeSubject2Insert.start = 11
-        rangeSubject2Insert.end = 17
-         
-        path2Insert.range_query = rangeQuery2Insert
-        path2Insert.range_subject = rangeSubject2Insert
-        
-        path2Insert.identity = 90.2
-        path2Insert.score = 30
-        path2Insert.e_value = 1e-20
-        path2Insert.id = 1
-        
-        self._db.createTable( self._table, "path" )
-        self._tpA.insert( path2Insert )
-        
-        expPathTuple = ((1L, "chr1", 1L, 10L, "TE2", 11L, 17L, 1e-20, 30L, 90.2),)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-        
-    def test_insert_BigEvalue( self ):
-        path2Insert = Path()
-        
-        rangeQuery2Insert =  Range()
-        rangeQuery2Insert.seqname = "chr1"
-        rangeQuery2Insert.start = 1
-        rangeQuery2Insert.end = 10
-        
-        rangeSubject2Insert =  Range()
-        rangeSubject2Insert.seqname = "TE2"
-        rangeSubject2Insert.start = 11
-        rangeSubject2Insert.end = 17
-         
-        path2Insert.range_query = rangeQuery2Insert
-        path2Insert.range_subject = rangeSubject2Insert
-        
-        path2Insert.identity = 90.2
-        path2Insert.score = 30.0
-        path2Insert.e_value = 1e-300
-        path2Insert.id = 1
-        
-        self._db.createTable( self._table, "path" )
-        self._tpA.insert( path2Insert )
-        
-        expPathTuple = ((1L, "chr1", 1L, 10L, "TE2", 11L, 17L, 1e-300, 30L, 90.2),)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-        
-    def test_insert_QryRevSbjDir( self ):
-        path2Insert = Path()
-        
-        rangeQuery2Insert =  Range()
-        rangeQuery2Insert.seqname = "chr1"
-        rangeQuery2Insert.start = 10
-        rangeQuery2Insert.end = 1
-        
-        rangeSubject2Insert =  Range()
-        rangeSubject2Insert.seqname = "TE2"
-        rangeSubject2Insert.start = 11
-        rangeSubject2Insert.end = 17
-         
-        path2Insert.range_query = rangeQuery2Insert
-        path2Insert.range_subject = rangeSubject2Insert
-        
-        path2Insert.identity = 90.2
-        path2Insert.score = 30
-        path2Insert.e_value = 1e-216
-        path2Insert.id = 1
-        
-        self._db.createTable( self._table, "path" )
-        self._tpA.insert( path2Insert )
-        
-        expPathTuple = ((1L, "chr1", 1L, 10L, "TE2", 17L, 11L, 1e-216, 30L, 90.2),)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-        
-    def test_insertList(self):
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n" )
-        p3 = Path()
-        p3.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n" )
-        p4 = Path()
-        p4.setFromString( "3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        lPath = [ p1, p2, p3, p4]
-        self._db.createTable( self._table, "path" )
-        self._tpA.insertList(lPath)
-        
-        tuple1 = (1L, "chr1", 1L, 10L, "TE1", 11L, 17L, 1e-20, 30L, 90.2)
-        tuple2 = (2L, "chr1", 2L, 9L, "TE2", 10L, 13L, 1e-20, 30L, 90.2)
-        tuple3 = (2L, "chr1", 12L, 19L, "TE2", 15L, 22L, 1e-10, 40L, 94.2)
-        tuple4 = (3L, "chr2", 8L, 13L, "TE1", 11L, 17L, 1e-20, 30L, 90.2)
-        expPathTuple = ( tuple1, tuple2, tuple3, tuple4)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsPathTuple = self._db.cursor.fetchall()
-        
-        self.assertEquals(expPathTuple, obsPathTuple)
-        
-        
-    def test_getIdListFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        p2a = Path()
-        p2a.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n" )
-        p2b = Path()
-        p2b.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n" )
-        p3 = Path()
-        p3.setFromString( "3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        for p in [ p1, p2a, p2b, p3 ]: self._tpA.insert( p )
-        lExp = [ 1, 2 ]
-        lObs = self._tpA.getIdListFromQuery( "chr1" )
-        self.assertEqual( lObs, lExp )
-        
-        
-    def test_getIdListFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        p2a = Path()
-        p2a.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n" )
-        p2b = Path()
-        p2b.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n" )
-        p3 = Path()
-        p3.setFromString( "3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        for p in [ p1, p2a, p2b, p3 ]: self._tpA.insert( p )
-        lExp = [ 2 ]
-        lObs = self._tpA.getIdListFromSubject( "TE2" )
-        self.assertEqual( lObs, lExp )
-        
-        
-    def test_getIdList( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n" )
-        p3 = Path()
-        p3.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n" )
-        p4 = Path()
-        p4.setFromString( "3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n" )
-        lPath = [ p1, p2, p3, p4]
-        self._tpA.insertList(lPath)
-        expList = [ 1, 2, 3 ]
-        obsList = self._tpA.getIdList()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getSubjectList( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE3\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = [ "TE1", "TE2", "TE3" ]
-        obsList = self._tpA.getSubjectList()
-        self.assertEqual( expList, obsList )
-        os.remove( pathFileName )
-        
-        
-    def test_getSubjectList_empty_table( self ):
-        self._db.createTable( self._table, "path" )
-        expList = []
-        obsList = self._tpA.getSubjectList()
-        self.assertEqual( obsList, expList )
-        
-        
-    def test_getQueryList( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE3\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = [ "chr1", "chr2" ]
-        obsList = self._tpA.getQueryList()
-        self.assertEqual( expList, obsList )
-        os.remove( pathFileName )
-        
-        
-    def test_getSubjectListFromQuery( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE3\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = [ "TE3", "TE2" ]
-        obsList = self._tpA.getSubjectListFromQuery( "chr1" )
-        self.assertEqual( expList, obsList )
-        os.remove( pathFileName )
-        
-        
-    def test_getSubjectListFromQuery_with_unexisted_query( self ):
-        pathFileName = "dummyPathFile_%s" % ( self._uniqId )
-        pathF = open( pathFileName, "w" )
-        pathF.write("1\tchr1\t1\t10\tTE3\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.2\n")
-        pathF.write("2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t94.2\n")
-        pathF.write("3\tchr2\t8\t13\tTE1\t11\t17\t1e-20\t30\t90.2\n")
-        pathF.close()
-        self._db.createTable( self._table, "path", pathFileName )
-        expList = []
-        obsList = self._tpA.getSubjectListFromQuery( "chr3" )
-        self.assertEqual( expList, obsList )
-        os.remove( pathFileName )
-        
-        
-    def test_getListOfAllPaths( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t10\tTE3\t11\t17\t1e-20\t30\t85.2\n" )
-        p2a = Path()
-        p2a.setFromString( "2\tchr1\t2\t9\tTE2\t10\t13\t1e-20\t30\t90.5\n" )
-        p2b = Path()
-        p2b.setFromString( "2\tchr1\t12\t19\tTE2\t15\t22\t1e-10\t40\t89.5\n" )
-        lPaths = [ p1, p2a, p2b ]
-        self._tpA.insertList( lPaths )
-        expList = [ p1, p2a, p2b ]
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getListOfAllPaths_empty_table( self ):
-        self._db.createTable( self._table, "path" )
-        expList = []
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListWithDirectQueryDirectSubjectFromQuerySubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t100\tTE1\t11\t110\t1e-20\t130\t85.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr2\t1\t100\tTE1\t11\t110\t1e-20\t130\t85.2\n" )  # different query
-        p3 = Path()
-        p3.setFromString( "3\tchr1\t1\t100\tTE2\t11\t110\t1e-20\t130\t85.2\n" )  # different subject
-        p4 = Path()
-        p4.setFromString( "4\tchr1\t100\t1\tTE1\t11\t110\t1e-20\t130\t85.2\n" )  # query on reverse strand
-        p5 = Path()
-        p5.setFromString( "5\tchr1\t1\t100\tTE1\t110\t11\t1e-20\t130\t85.2\n" )  # subject on reverse strand
-        p6 = Path()
-        p6.setFromString( "6\tchr1\t301\t400\tTE1\t11\t110\t1e-20\t130\t85.2\n" )  # further along the query
-        for p in [ p1, p2, p3, p4, p5, p6 ]: self._tpA.insert( p )
-        expList = [ p1, p6 ]
-        obsList = self._tpA.getPathListWithDirectQueryDirectSubjectFromQuerySubject( "chr1", "TE1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListWithDirectQueryReverseSubjectFromQuerySubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromString( "1\tchr1\t1\t100\tTE1\t110\t11\t1e-20\t130\t85.2\n" )
-        p2 = Path()
-        p2.setFromString( "2\tchr2\t1\t100\tTE1\t110\t11\t1e-20\t130\t85.2\n" )  # different query
-        p3 = Path()
-        p3.setFromString( "3\tchr1\t1\t100\tTE2\t110\t11\t1e-20\t130\t85.2\n" )  # different subject
-        p4 = Path()
-        p4.setFromString( "4\tchr1\t100\t1\tTE1\t110\t11\t1e-20\t130\t85.2\n" )  # query on reverse strand
-        p5 = Path()
-        p5.setFromString( "5\tchr1\t1\t100\tTE1\t11\t110\t1e-20\t130\t85.2\n" )  # subject on direct strand
-        p6 = Path()
-        p6.setFromString( "6\tchr1\t301\t400\tTE1\t110\t11\t1e-20\t130\t85.2\n" )  # further along the query
-        for p in [ p1, p2, p3, p4, p5, p6 ]: self._tpA.insert( p )
-        lExp = [ p1, p6 ]
-        lObs = self._tpA.getPathListWithDirectQueryReverseSubjectFromQuerySubject( "chr1", "TE1" )
-        self.assertEqual( lObs, lExp )
-        
-        
-    def test_isEmpty( self ):
-        self._db.createTable( self._table, "path" )
-        obs = self._tpA.isEmpty()
-        self.assertTrue( obs )
-        p = Path()
-        p.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        self._tpA.insert( p )
-        obs = self._tpA.isEmpty()
-        self.assertFalse( obs )
-        
-        
-    def test_getNbPathsFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "111", "200", "sbj1", "101", "190", "1e-32", "164", "97.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "2001", "2200", "sbj3", "1", "200", "1e-76", "247", "96.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPaths = [ p1a, p1b, p2, p3 ]
-        self._tpA.insertList( lPaths )
-        expNb = 3
-        obsNb = self._tpA.getNbPathsFromQuery( "qry1" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbPathsFromQuery_unexisted_query( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "111", "200", "sbj1", "101", "190", "1e-32", "164", "97.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "2001", "2200", "sbj3", "1", "200", "1e-76", "247", "96.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPaths = [ p1a, p1b, p2, p3 ]
-        self._tpA.insertList( lPaths )
-        expNb = 0
-        obsNb = self._tpA.getNbPathsFromQuery( "qry3" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbPathsFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "111", "200", "sbj1", "101", "190", "1e-32", "164", "97.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "2001", "2200", "sbj3", "1", "200", "1e-76", "247", "96.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPaths = [ p1a, p1b, p2, p3 ]
-        self._tpA.insertList( lPaths )
-        expNb = 3
-        obsNb = self._tpA.getNbPathsFromSubject( "sbj1" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbPathsFromSubject_unexisted_subject( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "111", "200", "sbj1", "101", "190", "1e-32", "164", "97.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "2001", "2200", "sbj3", "1", "200", "1e-76", "247", "96.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPaths = [ p1a, p1b, p2, p3 ]
-        self._tpA.insertList( lPaths )
-        expNb = 0
-        obsNb = self._tpA.getNbPathsFromSubject( "qry1" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbIds( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "1e-32", "164", "97.5" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "111", "200", "sbj1", "101", "190", "1e-32", "164", "97.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "2001", "2200", "sbj3", "1", "200", "1e-76", "247", "96.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        for p in [ p1a, p1b, p2, p3 ]: self._tpA.insert( p )
-        exp = 3
-        obs = self._tpA.getNbIds()
-        self.assertEqual( obs, exp )
-        
-        
-    def test_getNbIdsFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        expNb = 2
-        obsNb = self._tpA.getNbIdsFromSubject( "sbj1" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbIdsFromSubject_unexisted_subject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        expNb = 0
-        obsNb = self._tpA.getNbIdsFromSubject( "sbj2" )
-        self.assertEqual( expNb, obsNb ) 
-        
-        
-    def test_getNbIdsFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        expNb = 2
-        obsNb = self._tpA.getNbIdsFromQuery( "qry1" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getNbIdsFromQuery_unexisted_query( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        expNb = 0
-        obsNb = self._tpA.getNbIdsFromQuery( "qry2" )
-        self.assertEqual( expNb, obsNb )
-        
-        
-    def test_getPathListIncludedInQueryCoord_included( self ):
-        self._db.createTable( self._table, "path" )
-        p = Path()
-        p.setFromTuple( ( "1", "qry1", "123", "184", "sbj1", "1", "63", "0.0", "75", "97.5" ) )
-        self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        expList = [ p ]
-        obsList = self._tpA.getPathListIncludedInQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListIncludedInQueryCoord_included_start_higher_than_end( self ):
-        self._db.createTable( self._table, "path" )
-        p = Path()
-        p.setFromTuple( ( "1", "qry1", "123", "184", "sbj1", "1", "63", "0.0", "75", "97.5" ) )
-        self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        expList = [ p ]
-        obsList = self._tpA.getPathListIncludedInQueryCoord( "qry1", 200, 100 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListIncludedInQueryCoord_overlapping( self ):
-        self._db.createTable( self._table, "path" )
-        p = Path()
-        p.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        expList = []
-        obsList = self._tpA.getPathListIncludedInQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListOverlappingQueryCoord(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry1", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        expList = [ p1, p3 ]
-        obsList = self._tpA.getPathListOverlappingQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListOverlappingQueryCoord_no_overlapping_and_start_higher_than_end(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        expList = []
-        obsList = self._tpA.getPathListOverlappingQueryCoord( "qry1", 80, 1 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListOverlappingQueryCoord_withChains(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "qry1", "1", "150", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "qry1", "251", "350", "sbj1", "151", "250", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2a, p2b ]
-        self._tpA.insertList(lPath)
-        expList = [ p1, p2a ]
-        obsList = self._tpA.getPathListOverlappingQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getChainListOverlappingQueryCoord(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "qry1", "1", "150", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "qry1", "251", "350", "sbj1", "151", "250", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2a, p2b ]
-        self._tpA.insertList(lPath)
-        expList = [ p1, p2a, p2b ]
-        obsList = self._tpA.getChainListOverlappingQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getSetListOverlappingQueryCoord(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry1", "1", "350", "sbj2", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        
-        set1 = Set()
-        set1.setFromTuple( ( "1", "sbj1", "qry1", "83", "184" ) )
-        set3 = Set()
-        set3.setFromTuple( ( "3", "sbj2", "qry1", "1", "350" ) )
-        expList = [ set1, set3 ]
-        obsList = self._tpA.getSetListOverlappingQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getSetListOverlappingQueryCoord_no_overlapping_and_start_higher_than_end(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        expList = []
-        obsList = self._tpA.getSetListOverlappingQueryCoord( "qry1", 80, 1 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getSetListIncludedInQueryCoord(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "102", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry1", "1", "350", "sbj2", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        
-        set1 = Set()
-        set1.setFromTuple( ( "1", "sbj1", "qry1", "102", "184" ) )
-        expList = [ set1 ]
-        obsList = self._tpA.getSetListIncludedInQueryCoord( "qry1", 100, 200 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getSetListIncludedInQueryCoord_no_including_and_start_higher_than_end(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "83", "184", "sbj1", "1", "103", "0.0", "137", "96.5" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0", "187", "97.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "350", "sbj1", "1", "350", "1e-43", "624", "98.1" ) )
-        lPath = [ p1, p2, p3 ]
-        self._tpA.insertList(lPath)
-        expList = []
-        obsList = self._tpA.getSetListIncludedInQueryCoord( "qry1", 80, 1 )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListSortedByQueryCoord( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "3", "qry2", "101", "200", "sbj3", "1", "100", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "151", "500", "sbj1", "1", "350", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "1", "qry1", "1", "200", "sbj3", "1", "200", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p3 )
-        
-        expList = [ p3, p2, p1 ]
-        obsList = self._tpA.getPathListSortedByQueryCoord()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListSortedByQueryCoordFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "3", "qry2", "101", "200", "sbj3", "1", "100", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "151", "500", "sbj1", "1", "350", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "1", "qry1", "1", "200", "sbj3", "1", "200", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p3 )
-        
-        expList = [ p3, p2 ]
-        obsList = self._tpA.getPathListSortedByQueryCoordFromQuery( "qry1" )
-        self.assertEqual( expList, obsList )
-        
-    def test_getPathListSortedByQueryCoordAndScoreFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "3", "qry2", "101", "200", "sbj3", "1", "100", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "151", "500", "sbj1", "1", "350", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "1", "qry1", "1", "200", "sbj3", "1", "200", "0.0", "137", "96.5" ) )
-        self._tpA.insert( p3 )
-        p4 = Path()
-        p4.setFromTuple( ( "4", "qry1", "1", "200", "sbj3", "1", "200", "0.0", "200", "96.5" ) )
-        self._tpA.insert( p4 )
-        
-        expList = [  p3, p4, p2 ]
-        obsList = self._tpA.getPathListSortedByQueryCoordAndScoreFromQuery( "qry1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getCumulLengthFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        exp = 250
-        obs = self._tpA.getCumulLengthFromSubject( "sbj1" )
-        self.assertEqual( obs, exp )
-        
-        
-    def test_getCumulLengthFromSubject_with_no_subject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "1", "150", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-
-        exp = 0
-        obs = self._tpA.getCumulLengthFromSubject( "sbj2" )
-        self.assertEqual( obs, exp )    
-        
-        
-    def test_getChainLengthListFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        p4 = Path()
-        p4.setFromTuple( ( "3", "qry1", "1900", "1801", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p4 )
-        p5 = Path()
-        p5.setFromTuple( ( "4", "qry1", "1801", "1900", "sbj1", "100", "1", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p5 )
-        
-        expList = [ 250, 100, 100, 100 ]
-        obsList = self._tpA.getChainLengthListFromSubject( "sbj1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getChainIdentityListFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "94.3" ) )
-        self._tpA.insert( p3 )
-        
-        idChain1 = ( 98.5*(200-101+1) + 97.2*(500-351+1) ) / float(200-101+1+500-351+1)
-        idChain2 = 94.3*(900-801+1) / float(900-801+1)
-        expList = [ idChain1, idChain2 ]
-        obsList = self._tpA.getChainIdentityListFromSubject( "sbj1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathLengthListFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        expList = [ 100, 150, 100 ]
-        obsList = self._tpA.getPathLengthListFromSubject( "sbj1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getIdListSortedByDecreasingChainLengthFromSubject( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0847", "100", "97.2" ) )
-        self._tpA.insert( p1 )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "1", "150", "0.0035", "100", "97.2" ) )
-        self._tpA.insert( p2a )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "qry1", "551", "700", "sbj1", "151", "300", "0.0098", "100", "97.2" ) )
-        self._tpA.insert( p2b )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "541", "800", "sbj1", "1", "260", "0.147", "100", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        expList = [ 2, 3, 1 ]
-        obsList = self._tpA.getIdListSortedByDecreasingChainLengthFromSubject( "sbj1" )
-        
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getIdListFromSubjectWhereChainsLongerThanThreshold( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "1", "100", "sbj1", "1", "100", "0.0847", "100", "97.2" ) )  # 1-fragment copy, long enough
-        self._tpA.insert( p1 )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "qry2", "1", "50", "sbj1", "1", "50", "0.0035", "100", "97.2" ) )
-        self._tpA.insert( p2a )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "qry2", "101", "150", "sbj1", "51", "100", "0.0098", "100", "97.2" ) )  # 2-fragment copy, long enough
-        self._tpA.insert( p2b )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry3", "1", "80", "sbj1", "1", "80", "0.147", "100", "97.2" ) )  # 1-fragment copy, too short
-        self._tpA.insert( p3 )
-        p4a = Path()
-        p4a.setFromTuple( ( "4", "qry4", "1", "30", "sbj1", "1", "30", "0.0035", "100", "97.2" ) )
-        self._tpA.insert( p4a )
-        p4b = Path()
-        p4b.setFromTuple( ( "4", "qry4", "101", "150", "sbj1", "31", "80", "0.0098", "100", "97.2" ) )  # 2-fragment copy, too short
-        self._tpA.insert( p4b )
-        
-        exp = [ 1, 2 ]
-        obs = self._tpA.getIdListFromSubjectWhereChainsLongerThanThreshold( "sbj1", 90 )
-        
-        self.assertEqual( exp, obs )
-        
-        
-    def test_getSetListFromQuery(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        set1 = Set()
-        set1.setFromTuple( ( "1", "sbj1", "qry1", "101", "200" ) )
-        set2 = Set()
-        set2.setFromTuple( ( "1", "sbj1", "qry1", "351", "500" ) )    
-        set3 = Set()
-        set3.setFromTuple( ( "2", "sbj1", "qry1", "801", "900" ) )
-        expList = [set1, set2, set3]
-        obsList =  self._tpA.getSetListFromQuery("qry1")
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_deleteFromId(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        self._tpA.deleteFromId(2)
-        expList = [p1, p2]
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_deleteFromPath(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        self._tpA.deleteFromPath(p3)
-        expList = [p1, p2]
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-    def test_deleteFromPath_two_lines_to_delete(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        p4 = Path()
-        p4.setFromTuple( ( "2", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p4 )
-        
-        self._tpA.deleteFromPath(p3)
-        expList = [p1, p2]
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_deleteFromIdList(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        lId = [ 1, 2 ]
-        self._tpA.deleteFromIdList(lId)
-        expList = [ p3 ]
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_joinTwoPaths(self):
-        self._db.createTable( self._table, "path" )
-        
-        idPath1 = 5
-        p1 = Path()
-        p1.setFromTuple( ( "5", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        
-        idPath2 = 2
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        
-        lPath = [ p1, p2 ]
-        self._tpA.insertList( lPath )
-
-        self._tpA.joinTwoPaths(idPath1, idPath2)
-
-        expP1 = Path()
-        expP1.setFromTuple( ( "2", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        expP2 = Path()
-        expP2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        
-        expList = [ expP1, expP2 ]
-        obsList = self._tpA.getListOfAllPaths()
-        
-        self.assertEqual( expList, obsList)
-        self._db.dropTable(self._table)        
-        
-        
-    def test_joinTwoPaths_with_id1_inferior_at_id2(self):
-        self._db.createTable( self._table, "path" )
-        
-        idPath1 = 5
-        p1 = Path()
-        p1.setFromTuple( ( "5", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        
-        idPath2 = 2
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        
-        lPath = [ p1, p2 ]
-        self._tpA.insertList( lPath )
-
-        self._tpA.joinTwoPaths(idPath2, idPath1)
-
-        expP1 = Path()
-        expP1.setFromTuple( ( "2", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        expP2 = Path()
-        expP2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        
-        expList = [ expP1, expP2 ]
-        obsList = self._tpA.getListOfAllPaths()
-        
-        self.assertEqual( expList, obsList)
-        self._db.dropTable(self._table)       
-        
-        
-    def test_getNewId(self):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0", "137", "98.5" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj1", "101", "250", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry1", "801", "900", "sbj1", "1", "100", "0.0", "187", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        expId = 4
-        obsId = self._tpA.getNewId()
-        self.assertEqual( expId, obsId)
-        
-        
-    def test_getNewId_path_null(self):
-        self._db.createTable( self._table, "path" )
-        expId = 1
-        obsId = self._tpA.getNewId()
-        self.assertEqual( expId, obsId)
-        
-        
-    def test_getListOfChainsSortedByAscIdentityFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1a = Path()
-        p1a.setFromTuple( ( "1", "qry1", "11", "100", "sbj1", "1", "90", "0.0", "132", "96.2" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "qry1", "101", "120", "sbj2", "1", "20", "0.0", "36", "98.0" ) )
-        p1b = Path()
-        p1b.setFromTuple( ( "1", "qry1", "121", "200", "sbj1", "91", "170", "0.0", "117", "96.5" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "qry1", "201", "800", "sbj2", "21", "620", "0.0", "856", "93.2" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "1", "1000", "sbj1", "1", "1000", "1e-120", "900", "100.0" ) )
-        for p in [ p1a, p2a, p1b, p2b, p3 ]: self._tpA.insert( p )
-        lPaths1 = [ p1a, p1b ]
-        lPaths2 = [ p2a, p2b ]
-        expList = [ lPaths2, lPaths1 ]
-        obsList = self._tpA.getListOfChainsSortedByAscIdentityFromQuery( "qry1" )
-        for lPaths in expList: PathUtils.getPathListSortedByIncreasingMinQueryThenMaxQuery( lPaths )
-        for lPaths in obsList: PathUtils.getPathListSortedByIncreasingMinQueryThenMaxQuery( lPaths )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_getPathListSortedByIncreasingEvalueFromQuery( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "qry1", "101", "200", "sbj1", "1", "100", "0.0847", "100", "97.2" ) )
-        self._tpA.insert( p1 )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "qry1", "351", "500", "sbj2", "1", "150", "0.0035", "100", "97.2" ) )
-        self._tpA.insert( p2 )
-        p3 = Path()
-        p3.setFromTuple( ( "3", "qry2", "541", "800", "sbj3", "1", "260", "0.147", "100", "97.2" ) )
-        self._tpA.insert( p3 )
-        
-        expList = [ p2, p1 ]
-        obsList = self._tpA.getPathListSortedByIncreasingEvalueFromQuery( "qry1" )
-        self.assertEqual( expList, obsList )
-        
-        
-    def test_path2PathRange_QryDirSbjDir( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "chr2", "1", "100", "TE2", "10", "109", "1e-20", "163", "92.1" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "chr2", "201", "250", "TE2", "151", "200", "1e-10", "75", "88.7" ) )
-        for p in [ p1, p2a, p2b ]: self._tpA.insert( p )
-        p2 = Path()
-        
-        p2.setFromTuple( ( "2", "chr2", "1", "250", "TE2", "10", "200", "1e-20", "238", "90.96" ) )   # 'merge' p2a and p2b
-        expList = [ p1, p2 ]
-        obsTable = self._tpA.path2PathRange()
-        self._tpA._table = obsTable
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        self._db.dropTable( obsTable )
-        
-        
-    def test_path2PathRange_QryDirSbjRev( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "chr2", "1", "100", "TE2", "109", "10", "1e-20", "163", "92.1" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "chr2", "201", "250", "TE2", "200", "151", "1e-10", "75", "88.7" ) )
-        for p in [ p1, p2a, p2b ]: self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "chr2", "1", "250", "TE2", "200", "10", "1e-20", "238", "90.96" ) )   # 'merge' p2a and p2b
-        expList = [ p1, p2 ]
-        obsTable = self._tpA.path2PathRange()
-        self._tpA._table = obsTable
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( obsList, expList )
-        self._db.dropTable( obsTable )
-        
-        
-###################################################################################
-############################ Tests for other methods ##############################
-###################################################################################
-        
-    def test_path2PathRangeFromQuery_QryDirSbjDir( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "chr2", "1", "100", "TE2", "10", "109", "1e-20", "163", "92.1" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "chr2", "201", "250", "TE2", "151", "200", "1e-10", "75", "88.7" ) )
-        for p in [ p1, p2a, p2b ]: self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "chr2", "1", "250", "TE2", "10", "200", "1e-20", "238", "90.96" ) )   # 'merge' p2a and p2b
-        expList = [ p2 ]
-        obsTable = self._tpA._path2PathRangeFromQuery( "chr2" )
-        self._tpA._table = obsTable
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( expList, obsList )
-        self._db.dropTable( obsTable )
-        
-        
-    def test_path2PathRangeFromQuery_QryDirSbjRev( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p2a = Path()
-        p2a.setFromTuple( ( "2", "chr2", "1", "100", "TE2", "109", "10", "1e-20", "163", "92.1" ) )
-        p2b = Path()
-        p2b.setFromTuple( ( "2", "chr2", "201", "250", "TE2", "200", "151", "1e-10", "75", "88.7" ) )
-        for p in [ p1, p2a, p2b ]: self._tpA.insert( p )
-        p2 = Path()
-        p2.setFromTuple( ( "2", "chr2", "1", "250", "TE2", "200", "10", "1e-20", "238", "90.96" ) )   # 'merge' p2a and p2b
-        expList = [ p2 ]
-        obsTable = self._tpA._path2PathRangeFromQuery( "chr2" )
-        self._tpA._table = obsTable
-        obsList = self._tpA.getListOfAllPaths()
-        self.assertEqual( obsList, expList )
-        self._db.dropTable( obsTable )
-        
-        
-    def test_getNbOccurrences( self ):
-        self._db.createTable( self._table, "path" )
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        
-        exp = 0
-        obs = self._tpA.getNbOccurrences( p1 )
-        self.assertEquals( exp, obs )
-        
-        self._tpA.insert( p1 )
-        exp = 1
-        obs = self._tpA.getNbOccurrences( p1 )
-        self.assertEquals( exp, obs )
-        
-        self._tpA.insert( p1 )
-        exp = 2
-        obs = self._tpA.getNbOccurrences( p1 )
-        self.assertEquals( exp, obs )
-        
-    def test_getListOfUniqueOccPath(self):
-        
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "1", "chr1", "2", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p4 = Path()
-        p4.setFromTuple( ( "2", "chr2", "2", "11", "TE4", "10", "18", "1e-30", "40", "95.0" ) )
-        lPath = [p1,p2,p3,p4]
-                
-        expListPath = deepcopy([p1,p3,p4])     
-        obsListUniquePath = self._tpA.getListOfUniqueOccPath(lPath)
-        self.assertEquals( expListPath, obsListUniquePath )
-
-    def test_getListOfUniqueOccPath_empty_list(self):
-        expListPath = []     
-        obsListUniquePath = self._tpA.getListOfUniqueOccPath([])
-        self.assertEquals( expListPath, obsListUniquePath )
-        
-    def test_getListOfUniqueOccPath_one_item(self):
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        expListPath = deepcopy([p1])      
-        obsListUniquePath = self._tpA.getListOfUniqueOccPath([p1])
-        self.assertEquals( expListPath, obsListUniquePath )
-
-    def test_getListOfUniqueOccPath_unsorted_list(self):
-        
-        p1 = Path()
-        p1.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p3 = Path()
-        p3.setFromTuple( ( "1", "chr1", "3", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        p4 = Path()
-        p4.setFromTuple( ( "2", "chr2", "2", "11", "TE4", "10", "18", "1e-30", "40", "95.0" ) )
-        p2 = Path()
-        p2.setFromTuple( ( "1", "chr1", "1", "10", "TE3", "11", "17", "1e-20", "30", "85.0" ) )
-        
-        lPath = [p1,p3,p4,p2]
-                
-        expListPath = deepcopy([p1,p3,p4])     
-        obsListUniquePath = self._tpA.getListOfUniqueOccPath(lPath)
-        self.assertEquals( expListPath, obsListUniquePath )
-
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TablePathAdaptator ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_TableSeqAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,321 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import os
-import time
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.TableSeqAdaptator import TableSeqAdaptator
-from commons.core.seq.Bioseq import Bioseq
-from commons.core.coord.Set import Set
-from commons.core.utils.FileUtils import FileUtils
-
-
-class Test_TableSeqAdaptator( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._uniqId = "%s_%s" % ( time.strftime("%Y%m%d%H%M%S") , os.getpid() )
-        self.fileUtils = FileUtils()
-        self._configFileName = "dummyConfigFile_%s" % ( self._uniqId )
-        configF = open(self._configFileName, "w" )
-        configF.write( "[repet_env]\n" )
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-        self._db = DbMySql( cfgFileName=self._configFileName )
-        self._table = "dummySeqTable_%s" % ( self._uniqId )
-        self._tsA = TableSeqAdaptator( self._db, self._table )
-        
-        
-    def tearDown( self ):
-        self._db.dropTable( self._table )
-        self._db.close()
-        os.remove( self._configFileName )
-        self._configFileName = ""
-        
-        
-##################################################################################
-################## Tests for methods in ITableSeqAdaptator #######################
-##################################################################################
-        
-    def test_insert( self ):
-        bs = Bioseq( "seq1", "AGCGATGACGATGCGAGT" )
-        self._db.createTable( self._table, "fasta" )
-        self._tsA.insert( bs )
-        
-        expBioseqTuple = (("seq1", "AGCGATGACGATGCGAGT", "seq1", 18L), )
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsBioseqTuple = self._db.cursor.fetchall()
-        
-        self.assertEqual( expBioseqTuple, obsBioseqTuple )
-        
-        
-    def test_insertList( self ):
-        bs1 = Bioseq( "seq1 desc", "AGCGATGACGATGCGAGT" )
-        bs2 = Bioseq( "seq2", "AGCGATGACGATGCGAGT")
-        bs3 = Bioseq( "seq3", "GCGATGCAGATGACGGCGGATGC")
-        lBioseq = [ bs1, bs2, bs3 ]
-        self._db.createTable( self._table, "fasta" )
-        self._tsA.insertList( lBioseq )
-        
-        tuple1 = ("seq1", "AGCGATGACGATGCGAGT", "seq1 desc", 18L)
-        tuple2 = ("seq2", "AGCGATGACGATGCGAGT", "seq2", 18L)
-        tuple3 = ("seq3", "GCGATGCAGATGACGGCGGATGC", "seq3", 23L)
-        expBioseqTuple = ( tuple1, tuple2, tuple3 )
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._db.execute( sqlCmd )
-        obsBioseqTuple = self._db.cursor.fetchall()
-        
-        self.assertEquals(expBioseqTuple, obsBioseqTuple)
-        
-        
-    def test_getAccessionsList(self):
-        faFileName = "dummyFaFile_%s" % ( self._uniqId )
-        faF = open( faFileName, "w" )
-        faF.write(">seq1\n")
-        faF.write("AGCGATGACGATGCGAGT\n")
-        faF.write(">seq2\n")
-        faF.write("GCGATGCAGATGACGGCGGATGC\n")
-        faF.close()
-        self._db.createTable( self._table, "fasta", faFileName )
-        lExp = [ "seq1", "seq2" ]
-        lExp.sort()
-        lObs = self._tsA.getAccessionsList()
-        lObs.sort()
-        self.assertEqual( lObs, lExp )
-        os.remove( faFileName )
-        
-        
-    def test_saveAccessionsListInFastaFile(self):
-        expFileName = "dummyFaFile_%s" % ( self._uniqId )
-        expF = open( expFileName, "w" )
-        expF.write(">seq1\n")
-        expF.write("AGCGATGACGATGCGAGT\n")
-        expF.write(">seq2\n")
-        expF.write("GCGATGCAGATGACGGCGGATGC\n")
-        expF.close()
-        self._db.createTable( self._table, "fasta", expFileName )
-        lAccessions = [ "seq1", "seq2" ]
-        obsFileName = "dummyObsFile_%s" % ( self._uniqId )
-        self._tsA.saveAccessionsListInFastaFile( lAccessions, obsFileName )
-        self.assertTrue( self.fileUtils.are2FilesIdentical( obsFileName, expFileName ) )
-        os.remove( expFileName )
-        os.remove( obsFileName )
-        
-    def test_exportInFastaFile(self):
-        expFileName = "dummyFaFile_%s" % ( self._uniqId )
-        faF = open( expFileName, "w" )
-        faF.write(">seq1\n")
-        faF.write("AGCGATGACGATGCGAGT\n")
-        faF.write(">seq2\n")
-        faF.write("GCGATGCAGATGACGGCGGATGC\n")
-        faF.close()
-        self._db.createTable( self._table, "fasta", expFileName )
-        obsFileName = "dummyFaFileObs_%s" % ( self._uniqId )
-        self._tsA.exportInFastaFile( obsFileName )
-        self.assertTrue( self.fileUtils.are2FilesIdentical( obsFileName, expFileName ) )
-        os.remove( expFileName )
-        os.remove( obsFileName )
-
-##################################################################################
-########################### Tests for other methods ##############################
-##################################################################################
-        
-    def test_insertWithBioseqEmpty( self ):
-        bs = Bioseq( "", "" )
-        self._db.createTable( self._table, "fasta" )
-        exp = None
-        obs = self._tsA.insert(bs)
-        self.assertEqual( exp, obs )
-        
-        
-    def test_getBioseqFromHeader( self ):
-        faFileName = "dummyFaFile_%s" % ( self._uniqId )
-        faF = open( faFileName, "w" )
-        faF.write(">seq1\n")
-        faF.write("AGCGATGACGATGCGAGT\n")
-        faF.write(">seq2\n")
-        faF.write("GCGATGCAGATGACGGCGGATGC\n")
-        faF.close()
-        self._db.createTable( self._table, "fasta", faFileName )
-        exp = Bioseq( "seq1", "AGCGATGACGATGCGAGT" )
-        obs = self._tsA.getBioseqFromHeader( "seq1" )
-        self.assertEqual( obs, exp )
-        exp = Bioseq( "seq2", "GCGATGCAGATGACGGCGGATGC" )
-        obs = self._tsA.getBioseqFromHeader( "seq2" )
-        self.assertEqual( obs, exp )
-        os.remove( faFileName )
-        
-        
-    def test_getSeqLengthFromAccession( self ):
-        inFileName = "dummyFaFile_%s" % ( self._uniqId )
-        inF = open( inFileName, "w" )
-        inF.write(">seq1\n")
-        inF.write("AGCGATGACGATGCGAGT\n")
-        inF.write(">seq2\n")
-        inF.write("GCGATGCAGATGACGGCGGATGC\n")
-        inF.close()
-        self._db.createTable( self._table, "fasta", inFileName )
-        exp = 18
-        obs = self._tsA.getSeqLengthFromAccession( "seq1" )
-        self.assertEqual( obs, exp )
-        os.remove( inFileName )
-
-
-    def test_getSeqLengthFromDescription( self ):
-        inFileName = "dummyFaFile_%s" % ( self._uniqId )
-        inF = open( inFileName, "w" )
-        inF.write(">seq1 descriptionfield\n")
-        inF.write("AGCGATGACGATGCGAGT\n")
-        inF.write(">seq2 descriptionfield\n")
-        inF.write("GCGATGCAGATGACGGCGGATGC\n")
-        inF.close()
-        self._db.createTable( self._table, "fasta", inFileName )
-        exp = 18
-        obs = self._tsA.getSeqLengthFromDescription( "seq1 descriptionfield" )
-        self.assertEqual( obs, exp )
-        os.remove( inFileName )
-        
-        
-    def test_getAccessionAndLengthList( self ):
-        inFileName = "dummyFaFile_%s" % ( self._uniqId )
-        inF = open( inFileName, "w" )
-        inF.write(">seq1\n")
-        inF.write("AGCGATGACGATGCGAGT\n")
-        inF.write(">seq2\n")
-        inF.write("GCGATGCAGATGACGGCGGATGC\n")
-        inF.close()
-        self._db.createTable( self._table, "fasta", inFileName )
-        lSeq1 = ("seq1", 18)
-        lSeq2 = ("seq2", 23)
-        lExp = [lSeq1,lSeq2]
-        lObs = self._tsA.getAccessionAndLengthList()
-        self.assertEqual( lObs, lExp )
-        os.remove( inFileName )
-        
-        
-    def test_getSeqLengthFromAccessionWithSingleQuote( self ):
-        inFileName = "dummyFaFile_%s" % ( self._uniqId )
-        inF = open( inFileName, "w" )
-        inF.write(">seq1'\n")
-        inF.write("AGCGATGACGATGCGAGT\n")
-        inF.write(">seq2\n")
-        inF.write("GCGATGCAGATGACGGCGGATGC\n")
-        inF.close()
-        self._db.createTable( self._table, "fasta", inFileName )
-        exp = 18
-        obs = self._tsA.getSeqLengthFromAccession( "seq1'" )
-        self.assertEqual( obs, exp )
-        os.remove( inFileName )
-        
-        
-    def test_getSubSequence_directStrand( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        exp = "TTTGGG"
-        obs = self._tsA.getSubSequence( "chr2", 13, 18 )
-        self.assertEqual( exp, obs )
-        
-        
-    def test_getSubSequence_reverseStrand( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        exp = "CCCAAA"
-        obs = self._tsA.getSubSequence( "chr2", 18, 13 )
-        self.assertEqual( exp, obs )
-        
-        
-    def test_getBioseqFromSetList_directStrand( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        lSets = []
-        lSets.append( Set( 3, "Dm-B-G600-Map3_classI-LTR-incomp", "chr2", 1, 10 ) )
-        lSets.append( Set( 3, "Dm-B-G600-Map3_classI-LTR-incomp", "chr2", 16, 25 ) )
-        exp = Bioseq( "Dm-B-G600-Map3_classI-LTR-incomp::3 chr2 1..10,16..25", "AAAAAAAAAAGGGGGGGGGG" )
-        obs = self._tsA.getBioseqFromSetList( lSets )
-        self.assertEqual( exp, obs )
-        
-        
-    def test_getBioseqFromSetList_reverseStrand( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        lSets = []
-        lSets.append( Set( 3, "Dm-B-G600-Map3_classI-LTR-incomp", "chr2", 10, 1 ) )
-        lSets.append( Set( 3, "Dm-B-G600-Map3_classI-LTR-incomp", "chr2", 25, 16 ) )
-        exp = Bioseq( "Dm-B-G600-Map3_classI-LTR-incomp::3 chr2 25..16,10..1", "CCCCCCCCCCTTTTTTTTTT" )
-        obs = self._tsA.getBioseqFromSetList( lSets )
-        self.assertEqual( exp, obs )
-        
-        
-    def test_isAccessionInTable_true( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        
-        obs = self._tsA.isAccessionInTable( "chr2" )
-        self.assertTrue( obs )
-        
-        
-    def test_isAccessionInTable_false( self ):
-        self._db.createTable( self._table, "seq" )
-        chr = Bioseq()
-        chr.setHeader( "chr2" )
-        chr.setSequence( "AAAAAAAAAATTTTTGGGGGGGGGG" )
-        self._tsA.insert( chr )
-        
-        obs = self._tsA.isAccessionInTable( "chr1" )
-        self.assertFalse( obs )
-        
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TableSeqAdaptator ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Test_TableSetAdaptator.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,330 +0,0 @@
-# Copyright INRA (Institut National de la Recherche Agronomique)
-# http://www.inra.fr
-# http://urgi.versailles.inra.fr
-#
-# This software is governed by the CeCILL license under French law and
-# abiding by the rules of distribution of free software.  You can  use, 
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info". 
-#
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability. 
-#
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or 
-# data to be ensured and,  more generally, to use and operate it in the 
-# same conditions as regards security. 
-#
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-
-import unittest
-import time
-import os
-from commons.core.sql.TableSetAdaptator import TableSetAdaptator
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.coord.Set import Set
-
-
-class Test_TableSetAdaptator( unittest.TestCase ):
-
-    def setUp( self ):
-        self._uniqId = "%s_%s" % ( time.strftime("%Y%m%d%H%M%S") , os.getpid() )
-        self._configFileName = "dummyConfigFile_%s" % ( self._uniqId )
-        configF = open(self._configFileName, "w" )
-        configF.write( "[repet_env]\n" )
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-        self._iDb = DbMySql( cfgFileName=self._configFileName )
-        self._table = "dummySetTable_%s" % ( self._uniqId )
-        self._tSetA = TableSetAdaptator( self._iDb, self._table )
-                
-    def tearDown( self ):
-        self._uniqId = None
-        self._iDb.dropTable( self._table )
-        self._iDb.close()
-        self._table = None
-        self._tSetA = None
-        os.remove( self._configFileName )
-        self._configFileName = ""
-
-    def test_insert(self):
-        set2Insert = Set()
-        set2Insert.id = 1
-        set2Insert.name = "name1"
-        set2Insert.seqname = "name2"
-        set2Insert.start = 1L
-        set2Insert.end = 50L
-        self._iDb.createTable( self._table, "set", "" )
-        self._tSetA.insert( set2Insert, False )
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        expTsetTuple = ((1, "name1", "name2", 1L, 50L),)
-        obsTsetTuples = self._iDb.cursor.fetchall()
-        self.assertEquals(expTsetTuple, obsTsetTuples )
-    
-    def test_insertList ( self ):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        expTsetTuple = ((1, "name1", "desc1", 1, 120), (2, "name2", "desc2", 1, 20))
-        obsTsetTuples = self._iDb.cursor.fetchall()
-        self.assertEqual( expTsetTuple, obsTsetTuples )
-
-    def test_getIdList(self):
-        set2Insert = Set()
-        set2Insert.id = 1
-        set2Insert.name = "name1"
-        set2Insert.seqname = "name2"
-        set2Insert.start = 1
-        set2Insert.end = 50
-        self._iDb.createTable( self._table, "set", "" )
-        self._tSetA.insert( set2Insert )
-        l = self._tSetA.getIdList()
-        self.assertEquals( set2Insert.id, l[0] )
-        
-    def test_getSeqNameList(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "2\tname2\tdesc2\t1\t50\n" )
-        for m in [ set1, set2, set3 ]: self._tSetA.insert( m )
-        lExp = ["desc1", "desc2"]
-        lObs = self._tSetA.getSeqNameList()
-        self.assertEqual( lObs, lExp )
-        
-    def test_getSetListFromSeqName(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "3\tname2\tdesc2\t1\t50\n" )
-        for m in [ set1, set2, set3 ]: self._tSetA.insert( m )
-        explSet = [Set( 2,"name2", "desc2", 1, 20), Set( 3,"name2", "desc2", 1, 50)]
-        obslSet = self._tSetA.getSetListFromSeqName("desc2")
-        self.assertEqual( explSet, obslSet )
-
-    def test_getSetListFromId(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        explSet = [Set( 2,"name2", "desc2", 1, 20)]
-        obslSet = self._tSetA.getSetListFromId(2)
-        self.assertEqual( explSet, obslSet )
-      
-    def test_getSetListFromIdList(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        explSet = [Set( 1, "name1", "desc1", 1, 120), Set( 2,"name2", "desc2", 1, 20)]
-        lId = [1, 2]
-        obslSet = self._tSetA.getSetListFromIdList(lId)
-        self.assertEqual( explSet, obslSet )
-     
-    def test_getSetListFromIdList_emptyList(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        explSet = []
-        lId = []
-        obslSet = self._tSetA.getSetListFromIdList(lId)
-        self.assertEqual( explSet, obslSet )
-     
-    def test_getSetListOverlappingCoord(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc2\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        explSet = [Set( 1,"name1", "desc2", 1, 120)]
-        obslSet = self._tSetA.getSetListOverlappingCoord("desc2", 30, 70)
-        self.assertEqual( explSet, obslSet )
-      
-    def test_deleteFromId(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "3\tname2\tdesc3\t1\t50\n" )
-        for m in [ set1, set2, set3 ]: self._tSetA.insert( m )
-        self._tSetA.deleteFromId(1)
-        expTSetTuples = (( 2,"name2", "desc2", 1, 20), ( 3,"name2", "desc3", 1, 50))
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTsetTuples = self._iDb.cursor.fetchall()
-        
-        self.assertEqual( expTSetTuples, obsTsetTuples )
-  
-    def test_deleteFromIdList(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "3\tname2\tdesc3\t1\t50\n" )
-        for m in [ set1, set2, set3 ]: self._tSetA.insert( m )
-        lId2del = [1, 2]
-        self._tSetA.deleteFromIdList(lId2del)
-        expTSetTuples = (( 3,"name2", "desc3", 1, 50),)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTsetTuples = self._iDb.cursor.fetchall()
-        
-        self.assertEqual( expTSetTuples, obsTsetTuples )
-    
-    def test_deleteFromIdListWithEmptyList(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" )
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "3\tname2\tdesc3\t1\t50\n" )
-        for m in [ set1, set2, set3 ]: self._tSetA.insert( m )
-        lId2del = []
-        self._tSetA.deleteFromIdList(lId2del)
-        expTSetTuples = ((1L, 'name1', 'desc1', 1L, 120L), (2L, 'name2', 'desc2', 1L, 20L), (3L, 'name2', 'desc3', 1L, 50L))
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        obsTsetTuples = self._iDb.cursor.fetchall()
-        
-        self.assertEqual( expTSetTuples, obsTsetTuples )
-     
-    def test_joinTwoSets(self):
-        self._iDb.createTable( self._table, "set", "" )
-        idSet1 = 5
-        set1 = Set()
-        set1.setFromString( "5\tname1\tdesc1\t1\t120\n" ) 
-        idSet2 = 2
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-        self._tSetA.joinTwoSets(idSet1, idSet2)
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        
-        expTSetTuples = ((2L, "name1", "desc1", 1L, 120L ), (2L, "name2", "desc2", 1L, 20L ))
-        obsTSetTuples = self._iDb.cursor.fetchall()
-        
-        self.assertEqual( expTSetTuples, obsTSetTuples)
-        self._iDb.dropTable(self._table)
-     
-    def test_joinTwoSetsWhereId1InfId2(self):
-        self._iDb.createTable( self._table, "set", "" )
-        idSet1 = 2
-        set1 = Set()
-        set1.setFromString( "5\tname1\tdesc1\t1\t120\n" ) 
-        
-        idSet2 = 5
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        
-        lset = [ set1, set2 ]
-        self._tSetA.insertList( lset )
-
-        self._tSetA.joinTwoSets(idSet1, idSet2)
-        
-        sqlCmd = "SELECT * FROM %s" % ( self._table )
-        self._iDb.execute( sqlCmd )
-        
-        expTSetTuples = ((2L, "name1", "desc1", 1L, 120L ), (2L, "name2", "desc2", 1L, 20L ))
-        obsTSetTuples = self._iDb.cursor.fetchall()
-        
-        self.assertEqual( expTSetTuples, obsTSetTuples)
-        self._iDb.dropTable(self._table)
-     
-    def test_getNewId(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        set1.setFromString( "1\tname1\tdesc1\t1\t120\n" ) 
-        set2 = Set()
-        set2.setFromString( "2\tname2\tdesc2\t1\t20\n" )
-        set3 = Set()
-        set3.setFromString( "5\tname1\tdesc1\t1\t120\n" ) 
-        set4 = Set()
-        set4.setFromString( "8\tname2\tdesc2\t1\t20\n" )
-        lset = [ set1, set2, set3, set4 ]
-        self._tSetA.insertList( lset )
-        expId = 9
-        obsId = self._tSetA.getNewId()
-        self.assertEqual( expId, obsId)
-        self._iDb.dropTable(self._table)
-     
-    def test_getNewId_set_null(self):
-        self._iDb.createTable( self._table, "set", "" )
-        set1 = Set()
-        lset = [ set1 ]
-        self._tSetA.insertList( lset )
-        expId = 1
-        obsId = self._tSetA.getNewId()
-        self.assertEqual( expId, obsId)
-        self._iDb.dropTable(self._table)  
-        
-    def test_getListOfAllSets( self ):
-        self._iDb.createTable( self._table, "set" )
-        s1 = Set()
-        s1.setFromString( "1\tchr1\tTE3\t1\t10\n" )
-        s2a = Set()
-        s2a.setFromString( "2\tchr1\tTE2\t2\t9\n" )
-        s2b = Set()
-        s2b.setFromString( "2\tchr1\tTE2\t12\t19\n" )
-        lSets = [ s1, s2a, s2b ]
-        self._tSetA.insertList( lSets )
-        expLSets = [ s1, s2a, s2b ]
-        obsLSets = self._tSetA.getListOfAllSets()
-        self.assertEqual( expLSets, obsLSets )
-        
-    def test_getListOfAllSets_empty_table( self ):
-        self._iDb.createTable( self._table, "set" )
-        expList = []
-        obsList = self._tSetA.getListOfAllSets()
-        self.assertEqual( expList, obsList )     
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_TableSetAdaptator ) )       
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/sql/test/Tst_F_RepetJob.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,236 +0,0 @@
-import os
-import time
-import sys
-import stat
-import unittest
-import glob
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.RepetJob import RepetJob
-from commons.core.sql.Job import Job
-
-class Test_F_RepetJob(unittest.TestCase):
-
-    def setUp(self):
-        self._jobTableName = "dummyJobTable"
-        self._db = DbMySql()
-        self._iRepetJob = RepetJob()
-        self._configFileName = "dummyConfigFile"
-        configF = open(self._configFileName, "w" )
-        configF.write( "[repet_env]\n" )
-        configF.write( "repet_host: %s\n" % ( os.environ["REPET_HOST"] ) )
-        configF.write( "repet_user: %s\n" % ( os.environ["REPET_USER"] ) )
-        configF.write( "repet_pw: %s\n" % ( os.environ["REPET_PW"] ) )
-        configF.write( "repet_db: %s\n" % ( os.environ["REPET_DB"] ) )
-        configF.write( "repet_port: %s\n" % ( os.environ["REPET_PORT"] ) )
-        configF.close()
-
-    def tearDown(self):
-        self._iRepetJob = None
-        self._db.dropTable( self._jobTableName )
-        self._db.close()
-        os.remove(self._configFileName)
-    
-    def test_submitJob_with_multiple_jobs(self):
-        job1 = self._createJobInstance("job1")
-        self._createLauncherFile(job1)
-
-        job2 = self._createJobInstance("job2")
-        self._createLauncherFile(job2)
-
-        job3 = self._createJobInstance("job3")
-        self._createLauncherFile(job3)
-        
-        self._iRepetJob.submitJob( job1, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iRepetJob.submitJob( job2, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-        self._iRepetJob.submitJob( job3, maxNbWaitingJobs=3, checkInterval=5, verbose=0 )
-
-        time.sleep(70)
-        
-        expJobStatus = "finished"
-        obsJobStatus1 = self._iRepetJob.getJobStatus(job1)
-        obsJobStatus2 = self._iRepetJob.getJobStatus(job2)
-        obsJobStatus3 = self._iRepetJob.getJobStatus(job3)
-        
-        self.assertEquals(expJobStatus, obsJobStatus1)
-        self.assertEquals(expJobStatus, obsJobStatus2)
-        self.assertEquals(expJobStatus, obsJobStatus3)
-        
-        jobName1 = job1.jobname
-        jobName2 = job2.jobname
-        jobName3 = job3.jobname
-        
-        expErrorFilePrefix1 = jobName1+ ".e" 
-        expOutputFilePrefix1 = jobName1 + ".o"
-        expErrorFilePrefix2 = jobName2 + ".e" 
-        expOutputFilePrefix2 = jobName2 + ".o"
-        expErrorFilePrefix3 = jobName3 + ".e" 
-        expOutputFilePrefix3 = jobName3 + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        lErrorFiles2 = glob.glob(expErrorFilePrefix2 + "*")
-        lOutputFiles2 = glob.glob(expOutputFilePrefix2 + "*")
-        lErrorFiles3 = glob.glob(expErrorFilePrefix3 + "*")
-        lOutputFiles3 = glob.glob(expOutputFilePrefix3 + "*")
-        
-        isLErrorFileNotEmpty1 = (len(lErrorFiles1) != 0) 
-        isLOutputFileNotEmpty1 = (len(lOutputFiles1) != 0)
-        isLErrorFileNotEmpty2 = (len(lErrorFiles2) != 0) 
-        isLOutputFileNotEmpty2 = (len(lOutputFiles2) != 0)
-        isLErrorFileNotEmpty3 = (len(lErrorFiles3) != 0) 
-        isLOutputFileNotEmpty3 = (len(lOutputFiles3) != 0)
-        
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileNotEmpty1 and isLOutputFileNotEmpty1)
-        self.assertTrue(isLErrorFileNotEmpty2 and isLOutputFileNotEmpty2)
-        self.assertTrue(isLErrorFileNotEmpty3 and isLOutputFileNotEmpty3)
-
-    def test_submitJob_job_already_submitted(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance("job")
-        self._iRepetJob.recordJob(iJob)
-        
-        isSysExitRaised = False
-        try:
-            self._iRepetJob.submitJob(iJob)
-        except SystemExit:
-            isSysExitRaised = True
-        self.assertTrue(isSysExitRaised)
-    
-    def test_waitJobGroup_with_several_nbTimeOut_waiting(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance("job")
-        self._createLauncherFile(iJob)
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.changeJobStatus(iJob, "running", "method")
-        
-        expMsg = "ERROR: job '%s', supposedly still running, is not handled by SGE anymore\n" % ( iJob.jobid )
-        
-        obsError = "obsError.txt"
-        obsErrorHandler = open(obsError, "w")
-        stderrRef = sys.stderr
-        sys.stderr = obsErrorHandler
-        
-        isSysExitRaised = False
-        try:
-            self._iRepetJob.waitJobGroup(self._jobTableName ,iJob.groupid, timeOutPerJob=3)
-        except SystemExit:
-            isSysExitRaised = True
-           
-        obsErrorHandler.close()
-        
-        obsErrorHandler = open(obsError, "r")
-        obsMsg = obsErrorHandler.readline()
-        obsErrorHandler.close()
-       
-        sys.stderr = stderrRef
-        os.remove(obsError)
-        os.system("rm launcherFileTest*.py")
-        self.assertTrue(isSysExitRaised)
-        self.assertEquals(expMsg, obsMsg)
-         
-    def test_waitJobGroup_with_error_job_maxRelaunch_two(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance("job")
-        self._createLauncherFile(iJob)
-        
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.changeJobStatus(iJob, "error", "method")
-        
-        self._iRepetJob.waitJobGroup(self._jobTableName ,iJob.groupid, 0, 2)
-        
-        time.sleep(10)
-        
-        expJobStatus = "finished"
-        obsJobStatus1 = self._iRepetJob.getJobStatus(iJob)
-        
-        self.assertEquals(expJobStatus, obsJobStatus1)
-        
-        jobName = iJob.jobname
-        
-        expErrorFilePrefix1 = jobName + ".e" 
-        expOutputFilePrefix1 = jobName + ".o"
-        
-        lErrorFiles1 = glob.glob(expErrorFilePrefix1 + "*")
-        lOutputFiles1 = glob.glob(expOutputFilePrefix1 + "*")
-        
-        isLErrorFileNotEmpty1 = (len(lErrorFiles1) != 0) 
-        isLOutputFileNotEmpty1 = (len(lOutputFiles1) != 0)
-        
-        self._iRepetJob.removeJob(iJob) 
-        os.system("rm launcherFileTest*.py *.e* *.o*")
-        self.assertTrue(isLErrorFileNotEmpty1 and isLOutputFileNotEmpty1)
-        
-
-    def test_isJobStillHandledBySge_True(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance("job")
-        self._createLauncherFile(iJob)
-        self._iRepetJob.submitJob(iJob)
-        
-        isJobHandledBySge = self._iRepetJob.isJobStillHandledBySge(iJob.jobid, iJob.jobname)
-        os.system("rm launcherFileTest*.py")
-        
-        self.assertTrue(isJobHandledBySge)
-
-    def test_isJobStillHandledBySge_False(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance("job")
-        self._createLauncherFile(iJob)
-        self._iRepetJob.recordJob(iJob)
-        
-        isJobHandledBySge = self._iRepetJob.isJobStillHandledBySge(iJob.jobid, iJob.jobname)
-        os.system("rm launcherFileTest*.py")
-        
-        self.assertFalse(isJobHandledBySge)
-        
-    def _createJobInstance(self, name):
-        return Job(self._jobTableName, 0, name, "test", "", "date;sleep 5;date", "./launcherFileTest_"+ name +".py")
-    
-    def _createLauncherFile(self, iJob):
-        jobFileHandler = open( iJob.launcher , "w" )
-
-        launcher = "#!/usr/bin/python\n"
-        launcher += "import os\n"
-        launcher += "import sys\n"
-        
-        launcher += "print \"system:\", os.uname()\n"
-        launcher += "sys.stdout.flush()\n"
-        newStatus = "running"
-        prg = "%s/bin/srptChangeJobStatus.py" % (os.environ["REPET_PATH"])
-        cmd = prg
-        cmd += " -t %s" % ( iJob.tablename )
-        cmd += " -n %s" % ( iJob.jobname )
-        cmd += " -g %s" % ( iJob.groupid )
-        if iJob.queue != "":
-            cmd += " -q %s" % ( iJob.queue )
-        cmd += " -s %s" % ( newStatus )
-        cmd += " -c %s"  %( self._configFileName )
-        cmd += " -v 1"
-        launcher +="os.system( \"" + cmd + "\" )\n"
-        
-        launcher += "print \"LAUNCH: "+ iJob.command + "\"\n"
-        launcher += "sys.stdout.flush()\n"
-        launcher += "exitStatus = os.system (\"" + iJob.command + "\")\n"
-        launcher += "if exitStatus != 0:\n"
-        launcher += "\tprint \"ERROR: "+  iJob.command + " returned exit status '%i'\" % ( exitStatus )\n"
-        
-        newStatus = "finished"
-        prg = os.environ["REPET_PATH"] + "/bin/srptChangeJobStatus.py"
-        cmd = prg
-        cmd += " -t %s" % ( iJob.tablename )
-        cmd += " -n %s" % ( iJob.jobname )
-        cmd += " -g %s" % ( iJob.groupid )
-        if iJob.queue != "":
-            cmd += " -q %s" % ( iJob.queue )
-        cmd += " -s %s" % ( newStatus )
-        cmd += " -c %s"  %( self._configFileName )
-        cmd += " -v 1"
-        launcher +="os.system( \"" + cmd + "\" )\n"
-        launcher += "sys.exit(0)\n"
-        jobFileHandler.write(launcher)
-        jobFileHandler.close()
-        os.chmod( iJob.launcher, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC )
-
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/sql/test/Tst_RepetJob.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,395 +0,0 @@
-import unittest
-import sys
-import os
-import time
-from commons.core.sql.DbMySql import DbMySql
-from commons.core.sql.Job import Job
-from commons.core.sql.RepetJob import RepetJob
-from commons.core.utils.FileUtils import FileUtils
-
-#TODO: to remove... => replace all RepetJob() by TableJobAdaptator()...
-class Test_RepetJob( unittest.TestCase ):
-    
-    def setUp(self):
-        self._jobTableName = "dummyJobTable"
-        self._db = DbMySql()
-        self._iRepetJob = RepetJob()
-    
-    def tearDown(self):
-        self._iRepetJob = None
-        self._db.close()
-        
-    def _createJobInstance(self):
-        return Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node", "lResources" )
-    
-    def test_createJobTable_is_table_created(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-    
-        isTableCreated = self._db.doesTableExist(self._jobTableName)
-        self.assertTrue(isTableCreated)
-    
-        self._db.dropTable(self._jobTableName)
-    
-    def test_createJobTable_field_list(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-
-        obsLFiled = self._db.getFieldList(self._jobTableName)
-        expLField = ["jobid", "jobname", "groupid", "command", "launcher", "queue", "status", "time", "node"]
-    
-        self.assertEquals(expLField, obsLFiled)
-    
-        self._db.dropTable(self._jobTableName)
-    
-    def test_recordJob(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-    
-        qryParams = "SELECT jobid, groupid, command, launcher, queue, status, node FROM " + self._jobTableName + " WHERE jobid = %s" 
-        params = (iJob.jobid)
-        
-        self._db.execute(qryParams, params)
-        
-        tObs = self._db.fetchall()[0]
-        tExp =(iJob.jobid, iJob.groupid, iJob.command, iJob.launcher, iJob.queue, "waiting", "?")
-        
-        self.assertEquals(tExp,tObs)
-
-        self._db.dropTable(self._jobTableName)
-    
-    def test_removeJob(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-
-        self._iRepetJob.removeJob(iJob)
-        
-        isTableEmpty = self._db.isEmpty(self._jobTableName)
-        
-        self.assertTrue(isTableEmpty)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def test_getJobStatus(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-
-        expStatus = "waiting"
-        obsStatus = self._iRepetJob.getJobStatus(iJob)
-        
-        self.assertEquals(expStatus, obsStatus)
-        self._db.dropTable(self._jobTableName)
-    
-    def test_getJobStatus_unknown(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()        
-
-        expStatus = "unknown"
-        obsStatus = self._iRepetJob.getJobStatus(iJob)
-        
-        self.assertEquals(expStatus, obsStatus)
-        self._db.dropTable(self._jobTableName)
-    
-    def test_getJobStatus_no_name(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = Job( self._jobTableName, 20, "", "groupid", "queue", "command", "launcherFile", "node", "lResources" ) 
-        
-        expStatus = "unknown"
-        obsStatus = self._iRepetJob.getJobStatus(iJob)
-        
-        self.assertEquals(expStatus, obsStatus)
-        self._db.dropTable(self._jobTableName)
-        
-    def test_getJobStatus_non_unique_job(self):
-        # Warning : this case will not append, because recordJob() begin by removeJob()
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        sqlCmd = "INSERT INTO %s" % ( iJob.tablename )
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % ( iJob.jobid )
-        sqlCmd += " \"%s\"," % ( iJob.jobname )
-        sqlCmd += " \"%s\"," % ( iJob.groupid )
-        sqlCmd += " \"%s\"," % ( iJob.command.replace("\"","\'") )
-        sqlCmd += " \"%s\"," % ( iJob.launcher )
-        sqlCmd += " \"%s\"," % ( iJob.queue )
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % ( time.strftime( "%Y-%m-%d %H:%M:%S" ) )
-        sqlCmd += " \"?\" );"
-        self._db.execute( sqlCmd )
-        self._db.execute( sqlCmd )
-        
-        expError = "expError.txt"
-        expErrorHandler = open(expError, "w")
-        expErrorHandler.write("ERROR while getting job status: non-unique jobs\n")
-        expErrorHandler.close()
-        
-        obsError = "obsError.txt"
-        obsErrorHandler = open(obsError, "w")
-        stderrRef = sys.stderr
-        sys.stderr = obsErrorHandler
-        
-        isSysExitRaised = False
-        try:
-            self._iRepetJob.getJobStatus(iJob)
-        except SystemExit:
-            isSysExitRaised = True
-           
-        obsErrorHandler.close()
-        
-        self.assertTrue(isSysExitRaised)
-        self.assertTrue(FileUtils.are2FilesIdentical(expError, obsError))
-        
-        sys.stderr = stderrRef
-        os.remove(obsError)
-        os.remove(expError)
-         
-        self._db.dropTable(self._jobTableName)
-        
-    def test_updateInfoTable(self):
-        self._iRepetJob.updateInfoTable(self._jobTableName, "dummyInfo")
-        
-        qryParams = "SELECT name, file FROM  info_tables WHERE name=%s AND file=%s"
-        params = (self._jobTableName, "dummyInfo")
-        
-        self._db.execute(qryParams, params)
-        tObs = self._db.fetchall()[0]
-        tExp = (self._jobTableName, "dummyInfo")
-        
-        self.assertEquals(tExp, tObs)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def test_changeJobStatus(self):
-        expStatus = "finished"
-        
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.changeJobStatus(iJob, expStatus, "method")
-        
-        qryParams = "SELECT status FROM " + self._jobTableName + " WHERE jobid =%s AND groupid=%s AND queue=%s" 
-        params = (iJob.jobid, iJob.groupid, iJob.queue)
-        self._db.execute(qryParams, params)
-
-        obsStatus = self._db.fetchall()[0][0]
-        self.assertEquals(expStatus, obsStatus)
-        self._iRepetJob.removeJob(iJob)
-        self._db.dropTable(self._jobTableName)
-
-    def test_getCountStatus(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(self._jobTableName, 1, "job2", "groupid", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        
-        self._iRepetJob.recordJob(iJob1)
-        self._iRepetJob.recordJob(iJob2)
-
-        expCount = 2
-        obsCount = self._iRepetJob.getCountStatus(self._jobTableName, iJob1.groupid, "waiting")
-        
-        self.assertEquals(expCount, obsCount)
-        
-    def test_getCountStatus_without_res(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        expCount = 0
-        
-        obsCount = self._iRepetJob.getCountStatus(self._jobTableName, "groupid", "waiting")
-        
-        self.assertEquals(expCount, obsCount)
-   
-    def test_cleanJobGroup(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(self._jobTableName, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(self._jobTableName, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        
-        self._iRepetJob.recordJob(iJob1)
-        self._iRepetJob.recordJob(iJob2)
-        self._iRepetJob.recordJob(iJob3)
-        
-        self._iRepetJob.cleanJobGroup(self._jobTableName, iJob1.groupid)
-        
-        qryParams = "SELECT count(*) FROM " + self._jobTableName  
-        
-        self._db.execute(qryParams)
-        
-        expCount = 1
-        obsCount = self._db.fetchall()[0][0]
-        
-        self.assertEquals(expCount, obsCount)
-        
-        self._iRepetJob.removeJob(iJob3)
-        self._db.dropTable(self._jobTableName)
-
-    def test_hasUnfinishedJob(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(self._jobTableName, 0, "jobname2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(self._jobTableName, 0, "jobname3", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        
-        self._iRepetJob.recordJob(iJob1)
-        self._iRepetJob.recordJob(iJob2)
-        self._iRepetJob.recordJob(iJob3)
-        
-        self._iRepetJob.changeJobStatus(iJob2, "finished", "method")
-        
-        expHasGrpIdFinished = True
-        obsHasGrpIdFinished = self._iRepetJob.hasUnfinishedJob(self._jobTableName, iJob1.groupid)
-        
-        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-        
-        self._iRepetJob.removeJob(iJob1)
-        self._iRepetJob.removeJob(iJob2)
-        self._iRepetJob.removeJob(iJob3)
-        self._db.dropTable(self._jobTableName)
-        
-    def test_hasUnfinishedJob_JobTableNotExists(self):
-        iJob1 = self._createJobInstance()
-        
-        expHasGrpIdFinished = False
-        obsHasGrpIdFinished = self._iRepetJob.hasUnfinishedJob(self._jobTableName, iJob1.groupid)
-        
-        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-        
-    def test_hasUnfinishedJob_AllJobsFinished(self): 
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob1 = self._createJobInstance()
-        iJob2 = Job(self._jobTableName, "jobid2", iJob1.groupid, "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        iJob3 = Job(self._jobTableName, "jobid2", "groupid3", "queue2", "command2", "launcherFile2", "node2", "lResources2")
-        
-        self._iRepetJob.recordJob(iJob1)
-        self._iRepetJob.recordJob(iJob2)
-        self._iRepetJob.recordJob(iJob3)
-        
-        self._iRepetJob.changeJobStatus(iJob1, "finished", "method")
-        self._iRepetJob.changeJobStatus(iJob2, "finished", "method")
-        
-        expHasGrpIdFinished = False
-        obsHasGrpIdFinished = self._iRepetJob.hasUnfinishedJob(self._jobTableName, iJob1.groupid)
-        
-        self.assertEquals(expHasGrpIdFinished, obsHasGrpIdFinished)
-        
-        self._iRepetJob.removeJob(iJob1)
-        self._iRepetJob.removeJob(iJob2)
-        self._iRepetJob.removeJob(iJob3)
-        self._db.dropTable(self._jobTableName)
-        
-    def test_waitJobGroup_with_finished_job(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.changeJobStatus(iJob, "finished", "method")
-        
-        Obs = self._iRepetJob.waitJobGroup(self._jobTableName ,iJob.groupid, 0)
-        Exp = None
-        
-        self.assertEquals(Exp, Obs)
-        self._iRepetJob.removeJob(iJob) 
-        
-    def test_waitJobGroup_with_error_job_maxRelaunch_zero(self):
-        Obs = False
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.changeJobStatus(iJob, "error", "method")
-        
-        try:
-            self._iRepetJob.waitJobGroup(self._jobTableName ,iJob.groupid, 0, 0)
-        except SystemExit:
-            Obs = True
-        
-        self.assertTrue(Obs)
-        self._iRepetJob.removeJob(iJob)
-        
-    def test_setJobIdFromSge(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob = self._createJobInstance()
-        self._iRepetJob.recordJob(iJob)
-        self._iRepetJob.setJobIdFromSge(iJob, 1000)
-        
-        qryParams = "SELECT jobid FROM " + self._jobTableName + " WHERE jobname = %s AND queue = %s AND groupid = %s" 
-        params = (iJob.jobname, iJob.queue, iJob.groupid)
-        
-        self._db.execute(qryParams, params)
-        
-        tObs = self._db.fetchall()[0]
-        tExp =(1000,)
-        
-        self.assertEquals(tExp,tObs)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def test_submitJob_8_fields_for_job_table(self):
-        iJob = self._createJobInstance()
-        self._db.dropTable(self._jobTableName)
-        sqlCmd = "CREATE TABLE " + self._jobTableName 
-        sqlCmd += " ( jobid INT UNSIGNED"
-        sqlCmd += ", groupid VARCHAR(255)"
-        sqlCmd += ", command TEXT"
-        sqlCmd += ", launcher VARCHAR(1024)"
-        sqlCmd += ", queue VARCHAR(255)"
-        sqlCmd += ", status VARCHAR(255)"
-        sqlCmd += ", time DATETIME"
-        sqlCmd += ", node VARCHAR(255) )"
-        self._db.execute(sqlCmd)
-        
-        self._iRepetJob.submitJob(iJob)
-        
-        expFieldsNb = 9
-        obsFieldsNb = len(self._iRepetJob.getFieldList(self._jobTableName))
-        
-        self.assertEquals(expFieldsNb, obsFieldsNb)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def test_getNodesListByGroupId(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob1 = Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources" )
-        iJob2 = Job( self._jobTableName, 1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources" )
-        iJob3 = Job( self._jobTableName, 2, "job3", "groupid2", "queue", "command", "launcherFile", "node3", "lResources" )
-        
-        self._insertJob(iJob1)
-        self._insertJob(iJob2)
-        self._insertJob(iJob3)
-        
-        expNodeList = ["node1", "node2"]
-        obsNodeList = self._iRepetJob.getNodesListByGroupId(self._jobTableName, "groupid")
-        self.assertEquals(expNodeList, obsNodeList)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def test_getNodesListByGroupId_empty_list(self):
-        self._iRepetJob.createTable(self._jobTableName, "jobs")
-        iJob1 = Job( self._jobTableName, 0, "job1", "groupid", "queue", "command", "launcherFile", "node1", "lResources" )
-        iJob2 = Job( self._jobTableName, 1, "job2", "groupid", "queue", "command", "launcherFile", "node2", "lResources" )
-        iJob3 = Job( self._jobTableName, 2, "job3", "groupid32", "queue", "command", "launcherFile", "node3", "lResources" )
-        
-        self._insertJob(iJob1)
-        self._insertJob(iJob2)
-        self._insertJob(iJob3)
-        
-        expNodeList = []
-        obsNodeList = self._iRepetJob.getNodesListByGroupId(self._jobTableName, "groupid3")
-        self.assertEquals(expNodeList, obsNodeList)
-        
-        self._db.dropTable(self._jobTableName)
-        
-    def _insertJob(self, iJob):
-        self._iRepetJob.removeJob( iJob )
-        sqlCmd = "INSERT INTO %s" % ( iJob.tablename )
-        sqlCmd += " VALUES ("
-        sqlCmd += " \"%s\"," % ( iJob.jobid )
-        sqlCmd += " \"%s\"," % ( iJob.jobname )
-        sqlCmd += " \"%s\"," % ( iJob.groupid )
-        sqlCmd += " \"%s\"," % ( iJob.command.replace("\"","\'") )
-        sqlCmd += " \"%s\"," % ( iJob.launcher )
-        sqlCmd += " \"%s\"," % ( iJob.queue )
-        sqlCmd += " \"waiting\","
-        sqlCmd += " \"%s\"," % ( time.strftime( "%Y-%m-%d %H:%M:%S" ) )
-        sqlCmd += " \"%s\" );" % ( iJob.node )
-        self._iRepetJob.execute( sqlCmd )
-        
-if __name__ == "__main__":
-    unittest.main()
--- a/commons/core/stat/Stat.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-import math
-
-class Stat(object):
-
-    def __init__(self, lValues = []):
-        self.reset()
-        if lValues != []:
-            self.fill(lValues)
-    
-    def __eq__(self, o):
-        self._lValues.sort()
-        o._lValues.sort()
-        return self._lValues == o._lValues and round(self._sum, 6) == round(o._sum, 6) \
-            and round(self._sumOfSquares, 6) == round(o._sumOfSquares, 6) and self._n == self._n \
-            and round(self._min, 6) == round(o._min, 6) and round(self._max, 6) == round(o._max, 6)
-            
-    def getValuesList(self):
-        return self._lValues
-    
-    def getSum(self):
-        return self._sum
-    
-    def getSumOfSquares(self):
-        return self._sumOfSquares
-    
-    def getValuesNumber(self):
-        return self._n
-    
-    def getMin(self):
-        return self._min
-    
-    def getMax(self):
-        return self._max
-
-    ## Reset all attributes
-    #
-    def reset(self):
-        self._lValues = []
-        self._sum = 0.0
-        self._sumOfSquares = 0.0
-        self._n = 0
-        self._max = 0.0
-        self._min = 0.0
-
-    ## Add a value to Stat instance list and update attributes
-    #
-    # @param v float value to add
-    #    
-    def add(self, v):
-        self._lValues.append( float(v) )
-        self._sum += float(v)
-        self._sumOfSquares += float(v) * float(v)
-        self._n = self._n + 1
-        if v > self._max:
-            self._max = float(v)
-        if self._n == 1:
-            self._min = float(v)
-        elif v < self._min:
-            self._min = float(v)
-         
-    ## Add a list of values to Stat instance list and update attributes
-    #
-    # @param lValues list of float list to add
-    #    
-    def fill(self, lValues):
-        for v in lValues:
-            self.add(v)
-    
-    ## Get the arithmetic mean of the Stat instance list
-    #
-    # @return float
-    #
-    def mean(self):
-        if self._n == 0:
-            return 0
-        else:
-            return self._sum / float(self._n)
-    
-    ## Get the variance of the sample
-    # @note we consider a sample, not a population. So for calculation, we use n-1
-    #
-    # @return float
-    #
-    def var(self):
-        if self._n < 2 or self.mean() == 0.0:
-            return 0
-        else:
-            variance = self._sumOfSquares/float(self._n - 1) - self._n/float(self._n - 1) * self.mean()*self.mean()
-            if round(variance, 10) == 0:
-                variance = 0
-            return variance
-
-    ## Get the standard deviation of the sample
-    #
-    # @return float
-    #
-    def sd(self):
-        return math.sqrt( self.var() )
-
-    ## Get the coefficient of variation of the sample
-    #
-    # @return float
-    #
-    def cv(self):
-        if self._n < 2 or self.mean() == 0.0:
-            return 0
-        else:
-            return self.sd() / self.mean()
-
-    ## Get the median of the sample
-    #
-    # @return number or "NA" (Not available)
-    #
-    def median( self ):
-        if len(self._lValues) == 0:
-            return "NA"
-        if len(self._lValues) == 1:
-            return self._lValues[0]
-        self._lValues.sort()
-        m = int( math.ceil( len(self._lValues) / 2.0 ) )
-        if len(self._lValues) % 2:
-            return self._lValues[m-1]
-        else:
-            return ( self._lValues[m-1] + self._lValues[m] ) / 2.0
-        
-    ## Get the kurtosis (measure of whether the data are peaked or flat relative to a normal distribution, 'coef d'aplatissement ' in french)).
-    #  k = 0 -> completely flat
-    #  k = 3 -> same as normal distribution
-    #  k >> 3 -> peak
-    #
-    # @return float 
-    #
-    def kurtosis(self):
-        numerator = 0
-        for i in self._lValues:
-            numerator += math.pow( i - self.mean(), 4 )
-        return numerator / float(self._n - 1) * self.sd() 
-
-    ## Prepare a string with calculations on your values
-    #
-    # @return string 
-    #
-    def string(self):
-        msg = ""
-        msg += "n=%d" % ( self._n )
-        msg += " mean=%5.3f" % ( self.mean() )
-        msg += " var=%5.3f" % ( self.var() )
-        msg += " sd=%5.3f" % ( self.sd() )
-        msg += " min=%5.3f" % ( self.getMin() )
-        median = self.median()
-        if median == "NA":
-            msg += " med=%s" % (median)
-        else:
-            msg += " med=%5.3f" % (median)
-        msg += " max=%5.3f" % ( self.getMax() )
-        return msg
-    
-    ## Print descriptive statistics
-    #
-    def view(self):
-        print self.string()
-
-    ## Return sorted list of values, ascending (default) or descending
-    #
-    # @return list
-    #
-    def sort( self, isReverse = False ):
-        self._lValues.sort(reverse = isReverse)
-        return self._lValues
-    
-    ## Give the quantile corresponding to the chosen percentage
-    #
-    # @return number 
-    #
-    def quantile( self, percentage ):
-        if self._n == 0:
-            return 0
-        elif percentage == 1:
-            return self.getMax()
-        else:
-            return self.sort()[int(self._n * percentage)]
-        
-    ## Prepare a string with quantile values
-    #
-    # @return string
-    #    
-    def stringQuantiles( self ):
-        return "n=%d min=%5.3f Q1=%5.3f median=%5.3f Q3=%5.3f max=%5.3f" % \
-               (self._n, self.quantile(0), self.quantile(0.25), self.quantile(0.5), self.quantile(0.75), self.quantile(1))
-
-    ## Print quantiles string
-    #
-    def viewQuantiles( self ):
-        print self.stringQuantiles()
-        
-    ## Compute N50 
-    # @return number
-    def N50(self ):
-        lSorted = self.sort(True)
-        midlValues = self.getSum() / 2
-        cumul = 0
-        index = 0
-        while cumul < midlValues:
-            cumul =  cumul + lSorted[index]
-            index += 1
-        if (index == 0):
-            return lSorted[index]
-        else :
-            return lSorted[index - 1]
\ No newline at end of file
--- a/commons/core/stat/test/Test_F_Stat.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-import unittest
-from commons.core.stat.Stat import Stat
-
-
-class Test_F_Stat(unittest.TestCase):
-
-
-    def test_output(self):
-        lValues = [0, -1, -5, 112, 10.2, 0.5, 4, -0.5]
-        iStat = Stat(lValues)
-        expString = "n=8 mean=15.025 var=1554.934 sd=39.433 min=-5.000 med=0.250 max=112.000"
-        self.assertEquals(expString, iStat.string())
-        
-    def test_outputQuantile(self):
-        lValues = [0, -1, -5, 112, 10.2, 0.5, 4, -0.5]
-        iStat = Stat(lValues)
-        expString = "n=8 min=-5.000 Q1=-0.500 median=0.500 Q3=10.200 max=112.000"
-        self.assertEquals(expString, iStat.stringQuantiles())
-        
-        
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/stat/test/Test_Stat.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,356 +0,0 @@
-from commons.core.stat.Stat import Stat
-import unittest
-
-class Test_Stat(unittest.TestCase):
-    
-    def test__eq__true(self):
-        iStat1 = Stat([1, 2, 3, 46])
-        iStat2 = Stat([1, 2, 3, 46])
-        self.assertTrue(iStat1 == iStat2)
-
-    def test__eq__false(self):
-        iStat1 = Stat([1, 2, 3, 4])
-        iStat2 = Stat([1, 2, 3, 46])
-        self.assertFalse(iStat1 == iStat2)
-
-    def test__eq__disordered_list(self):
-        iStat1 = Stat([3, 2, 1, 46])
-        iStat2 = Stat([1, 2, 3, 46])
-        self.assertTrue(iStat1 == iStat2)
-
-    def test_reset(self):
-        lValues = [1, 2, 5, 9, 12, 46]
-        iStat = Stat(lValues)
-        iStat.reset()
-        expValuesList = []
-        expSum = 0
-        expSum2 = 0
-        expN = 0
-        expMin = 0
-        expMax = 0
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-
-    def test_add_EmptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        iStat.add(5)
-        expValuesList = [5]
-        expSum = 5
-        expSum2 = 25
-        expN = 1
-        expMin = 5
-        expMax = 5
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-       
-    def test_add_Max(self):
-        lValues = [0,1,1]
-        iStat = Stat(lValues)
-        iStat.add(2)
-        expValuesList = [0,1,1,2]
-        expSum = 4
-        expSum2 = 6
-        expN = 4
-        expMin = 0
-        expMax = 2
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-       
-    def test_add_Min(self):
-        lValues = [2,1,1]
-        iStat = Stat(lValues)
-        iStat.add(0)
-        expValuesList = [2,1,1,0]
-        expSum = 4
-        expSum2 = 6
-        expN = 4
-        expMin = 0
-        expMax = 2
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-       
-    def test_fill_emptyList(self):
-        lValues = [2,1,1]
-        iStat = Stat(lValues)
-        iStat.fill([])
-        expValuesList = [2,1,1]
-        expSum = 4
-        expSum2 = 6
-        expN = 3
-        expMin = 1
-        expMax = 2
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-       
-    def test_fill(self):
-        lValues = [2, 1, 1]
-        iStat = Stat(lValues)
-        iStat.fill([4, 0])
-        expValuesList = [2, 1, 1, 4, 0]
-        expSum = 8
-        expSum2 = 22
-        expN = 5
-        expMin = 0
-        expMax = 4
-        obsValuesList = iStat.getValuesList()
-        obsSum = iStat.getSum()
-        obsSum2 = iStat.getSumOfSquares()
-        obsN = iStat.getValuesNumber()
-        obsMin = iStat.getMin()
-        obsMax = iStat.getMax()
-        self.assertEquals(expValuesList, obsValuesList)
-        self.assertEquals(expSum, obsSum)
-        self.assertEquals(expSum2, obsSum2)
-        self.assertEquals(expN, obsN)
-        self.assertEquals(expMin, obsMin)
-        self.assertEquals(expMax, obsMax)
-        
-    def test_mean_emptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        expMean = 0
-        obsMean = iStat.mean()
-        self.assertEquals(expMean, obsMean)
-        
-    def test_mean(self):
-        lValues = [0, 1]
-        iStat = Stat(lValues)
-        expMean = 0.5
-        obsMean = iStat.mean()
-        self.assertEquals(expMean, obsMean)
-        
-    def test_var_emptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        expVar = 0
-        obsVar = iStat.var()
-        self.assertEquals(expVar, obsVar)
-        
-    def test_var(self):
-        lValues = [1, 1, 2]
-        iStat = Stat(lValues)
-        expVar = round(1/float(3), 5)
-        obsVar = round(iStat.var(), 5)
-        self.assertEquals(expVar, obsVar)
-        
-    def test_var2(self):
-        lValues = [1,6,3,4,9,6]
-        iStat = Stat(lValues)
-        expVar = 7.76667
-        obsVar = round(iStat.var(), 5)
-        self.assertEquals(expVar, obsVar)
-        
-    def test_var_null(self):
-        lValues = [87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003]
-        iStat = Stat(lValues)
-        expVar = 0
-        obsVar = round(iStat.var(),5)
-        self.assertEquals(expVar, obsVar)
-        
-    def test_sd(self):
-        lValues = [1, 1, 2]
-        iStat = Stat(lValues)
-        expSd = round(0.577350269, 5)
-        obsSd = round(iStat.sd(), 5)
-        self.assertEquals(expSd, obsSd)
-        
-    def test_sd_null(self):
-        lValues = [87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003, 87.340000000000003]
-        iStat = Stat(lValues)
-        expSd = 0
-        obsSd = round(iStat.sd(), 5)
-        self.assertEquals(expSd, obsSd)
-
-    def test_cv(self):
-        lValues = [1, 1, 2]
-        iStat = Stat(lValues)
-        expSd = round(0.433012702, 5)
-        obsSd = round(iStat.cv(), 5)
-        self.assertEquals(expSd, obsSd)
-
-    def test_cv_mean_is_nul(self):
-        lValues = [1, -1]
-        iStat = Stat(lValues)
-        expSd = 0
-        obsSd = iStat.cv()
-        self.assertEquals(expSd, obsSd)
-        
-    def test_median_emptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        expMedian = "NA"
-        obsMedian = iStat.median()
-        self.assertEquals(expMedian, obsMedian)
-        
-    def test_median_even(self):
-        lValues = [1, 2, 3, 4, 1, 2, 54, 6, 7]
-        iStat = Stat(lValues)
-        expMedian = 3
-        obsMedian = iStat.median()
-        self.assertEquals(expMedian, obsMedian)
-        
-    def test_median_odd(self):
-        lValues = [1, 2, 3, 4, 2, 54, 6, 7]
-        iStat = Stat(lValues)
-        expMedian = 3.5
-        obsMedian = iStat.median()
-        self.assertEquals(expMedian, obsMedian)
-        
-    def test_kurtosis_flat(self):
-        lValues = [1, 1, 1]
-        iStat = Stat(lValues)
-        expKurtosis = 0
-        obsKurtosis = iStat.kurtosis()
-        self.assertEquals(expKurtosis, obsKurtosis)
-        
-    def test_kurtosis_peak(self):
-        lValues = [1, 100, -5]
-        iStat = Stat(lValues)
-        expKurtosis = round(712872278.6609683, 2)
-        obsKurtosis = round(iStat.kurtosis(), 2)
-        self.assertEquals(expKurtosis, obsKurtosis)
- 
-    def test_kurtosis_normal(self):
-        lValues = [-1, 0, 1.64, 1.64, 0, -1]
-        iStat = Stat(lValues)
-        expKurtosis = 3.0
-        obsKurtosis = round(iStat.kurtosis(), 1)
-        self.assertEquals(expKurtosis, obsKurtosis)
-        
-    def test_sort(self):
-        lValues = [-1, 0, 1.64, 1.64, 0, -1]
-        iStat = Stat(lValues)
-        expSort = [-1, -1, 0, 0, 1.64, 1.64]
-        obsSort = iStat.sort()
-        self.assertEquals(expSort, obsSort)
-        
-    def test_sort_reverse(self):
-        lValues = [-1, 0, 1.64, 1.64, 0, -1]
-        iStat = Stat(lValues)
-        expSort = [1.64, 1.64, 0, 0, -1, -1]
-        obsSort = iStat.sort(True)
-        self.assertEquals(expSort, obsSort)
-        
-    def test_sort_emptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        expSort = []
-        obsSort = iStat.sort()
-        self.assertEquals(expSort, obsSort)
-        
-    def test_quantile_emptyList(self):
-        lValues = []
-        iStat = Stat(lValues)
-        expQuantile = 0
-        obsQuantile = iStat.quantile(0.25)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_0perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = -1
-        obsQuantile = iStat.quantile(0)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_25perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = 0
-        obsQuantile = iStat.quantile(0.25)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_41perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = 1.64
-        obsQuantile = iStat.quantile(0.41)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_75perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = 2.64
-        obsQuantile = iStat.quantile(0.75)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_81perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = 5
-        obsQuantile = iStat.quantile(0.81)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_quantile_100perc(self):
-        lValues = [0, 2.64, 1.64, -1, 5]
-        iStat = Stat(lValues)
-        expQuantile = 5
-        obsQuantile = iStat.quantile(1)
-        self.assertEquals(expQuantile, obsQuantile)
-        
-    def test_N50(self):
-        lValues = [10, 10, 2, 16, 3, 4, 5]
-        iStat = Stat(lValues)
-        expN50 = 10
-        obsN50 = iStat.N50()
-        self.assertEquals(expN50, obsN50)
-
-    def test_N50SpecialValues(self):
-        lValues = [1, 100, 2, 3]
-        iStat = Stat(lValues)
-        expN50 = 100
-        obsN50 = iStat.N50()
-        self.assertEquals(expN50, obsN50)
-        
-if __name__ == "__main__":
-    unittest.main()
\ No newline at end of file
--- a/commons/core/test/Test_LoggerFactory.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-import unittest
-import logging
-from commons.core.LoggerFactory import LoggerFactory
-
-class Test_LoggerFactory( unittest.TestCase ):
-    
-    def test_logger_debug(self):
-        iLogger = LoggerFactory.createLogger("test")
-        isMethodExecuted = True
-        try:
-            iLogger.debug("message")
-        except:
-            isMethodExecuted = False
-        self.assertTrue(isMethodExecuted)
-    
-    def test_logger_info(self):
-        iLogger = LoggerFactory.createLogger("test")
-        isMethodExecuted = True
-        try:
-            iLogger.info("message")
-        except:
-            isMethodExecuted = False
-        self.assertTrue(isMethodExecuted)
-    
-    def test_logger_warning(self):
-        iLogger = LoggerFactory.createLogger("test")
-        isMethodExecuted = True
-        try:
-            iLogger.warning("message")
-        except:
-            isMethodExecuted = False
-        self.assertTrue(isMethodExecuted)
-    
-    def test_logger_error(self):
-        iLogger = LoggerFactory.createLogger("test")
-        isMethodExecuted = True
-        try:
-            iLogger.error("message")
-        except:
-            isMethodExecuted = False
-        self.assertTrue(isMethodExecuted)
-    
-    def test_logger_level_debug(self):
-        iLogger = LoggerFactory.createLogger("test")
-        LoggerFactory.setLevel(iLogger, 4)
-        expLevel = logging.DEBUG
-        obsLevel = iLogger.getEffectiveLevel()
-        self.assertEquals(expLevel, obsLevel)
-    
-    def test_logger_level_info(self):
-        iLogger = LoggerFactory.createLogger("test")
-        LoggerFactory.setLevel(iLogger, 3)
-        expLevel = logging.INFO
-        obsLevel = iLogger.getEffectiveLevel()
-        self.assertEquals(expLevel, obsLevel)
-    
-    def test_logger_level_warning(self):
-        iLogger = LoggerFactory.createLogger("test")
-        LoggerFactory.setLevel(iLogger, 2)
-        expLevel = logging.WARNING
-        obsLevel = iLogger.getEffectiveLevel()
-        self.assertEquals(expLevel, obsLevel)
-    
-    def test_logger_level_error(self):
-        iLogger = LoggerFactory.createLogger("test")
-        LoggerFactory.setLevel(iLogger, 1)
-        expLevel = logging.ERROR
-        obsLevel = iLogger.getEffectiveLevel()
-        self.assertEquals(expLevel, obsLevel)
-    
-    def test_logger_default_level(self):
-        iLogger = LoggerFactory.createLogger("test")
-        expLevel = logging.ERROR
-        obsLevel = iLogger.getEffectiveLevel()
-        self.assertEquals(expLevel, obsLevel)
-    
-    def test_logger_quiet(self):
-        iLogger = LoggerFactory.createLogger("test")
-        LoggerFactory.setLevel(iLogger, 0)
-        self.assertTrue(iLogger.disabled)
-        
-    def test_logger_noduplicate_handler(self):
-        iLogger = LoggerFactory.createLogger("test")
-        iLogger2 = LoggerFactory.createLogger("test")
-        
-        expNbHandlers = 1
-        obsNbHandlers = len(iLogger2.handlers)
-        self.assertEquals(expNbHandlers, obsNbHandlers)
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_LoggerFactory ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
\ No newline at end of file
--- a/commons/core/tree/Tree.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,122 +0,0 @@
-import os, re, sys
-
-class Tree:
-
-    def __init__( self, inFileName="" ):
-        self.tree = None
-        self.inFileName = inFileName
-        if self.inFileName != "":
-            self.loadTree()
-            
-    def loadTree( self, verbose=0 ):
-        inF = open( self.inFileName, "r" )
-        lines = inF.readlines()
-        inF.close()
-        line = "".join(lines).replace("\n","")
-        self.tree = self.parseTree( line )
-        if verbose > 0:
-            print "nb of leaves: %i" % ( self.getNbOfLeaves( self.tree ) )
-        
-    def parseTree( self, sTree ):
-        if "," not in sTree:
-            name, length = sTree.split(":")
-            return self.makeLeaf( name, float(length) )
-        
-        distPattern = re.compile(r'(?P<tree>\(.+\))\:(?P<length>[e\-\d\.]+)$')
-	m = distPattern.search( sTree )
-	length = 0
-	if m:			
-            if m.group('length'): length = float( m.group('length') )
-            sTree = m.group('tree')
-	if length == "": length = 0
-        
-        lhs, rhs = self.parseSubTree( sTree )
-        
-        return { "name": "internal",
-                       "left": self.parseTree( lhs ),
-                       "right": self.parseTree( rhs ),
-                       "length": length }
-        
-    def makeLeaf( self, name, length ):
-        return { "left":None, "right":None, "name":name, "length":length }
-    
-    def parseSubTree( self, sTree ):
-        """
-        Parse a newick-formatted string of type 'a,b' into [a,b]
-        """
-        chars = list( sTree[1:-1] )
-        count = 0
-        isLhs = True
-        leftS = ""
-	rightS = ""
-	for c in chars:
-            if c == "(":
-                count += 1
-            elif c == ")":
-                count -= 1
-            elif (c == ",") and (count == 0) and (isLhs) :
-                isLhs = False
-                continue
-            if isLhs: leftS += c
-            else: rightS += c
-	return [ leftS, rightS ]
-    
-    def toNewick( self, tree ):
-        newString = ""
-        if tree["name"] is not "internal":
-            newString += tree["name"]
-        else:
-            newString += "("
-            newString += self.toNewick( tree["left"] )
-            newString += ","
-            newString += self.toNewick( tree["right"] )
-            newString += ")"
-        if tree["length"]:
-            newString += ":"
-            newString += "%f" % ( tree["length"] )
-	return newString
-    
-    def saveTree( self, outFileName ):
-        outF = open( outFileName, "w" )
-        outF.write( self.toNewick( self.tree ) )
-        outF.close()
-        
-    def replaceHeaderViaPrefixSearch( self, tree, dNew2Init ):
-        if dNew2Init.has_key( tree["name"] ):
-            tree["name"] = dNew2Init[ tree["name"] ].replace(" ","_").replace("::","-").replace(",","-")
-        if tree["left"] != None:
-            self.replaceHeaderViaPrefixSearch( tree["left"], dNew2Init )
-        if tree["right"] != None:
-            self.replaceHeaderViaPrefixSearch( tree["right"], dNew2Init )
-            
-    def retrieveInitialSequenceHeaders( self, dNew2Init, outFileName  ):
-        tree = self.tree
-        self.replaceHeaderViaPrefixSearch( tree, dNew2Init )
-        self.tree = tree
-        self.saveTree( outFileName )
-
-    def getNbOfChildNodes( self, tree, nbNodes ):
-        if tree["left"] is not None:
-            nbNodes += 1
-            nbNodes = self.getNbOfChildNodes( tree["left"], nbNodes )
-        if tree["right"] is not None:
-            nbNodes += 1
-            nbNodes = self.getNbOfChildNodes( tree["right"], nbNodes )
-        return nbNodes
-    
-    def getNbOfNodes( self ):
-        nbNodes = 0
-        return self.getNbOfChildNodes( self.tree, nbNodes )
-    
-    def getNbOfChildLeaves( self, tree, nbLeaves ):
-        if tree["name"] != "internal":
-            nbLeaves += 1
-        if tree["left"] is not None:
-            nbLeaves = self.getNbOfChildLeaves( tree["left"], nbLeaves )
-        if tree["right"] is not None:
-            nbLeaves = self.getNbOfChildLeaves( tree["right"], nbLeaves )
-        return nbLeaves
-    
-    def getNbOfLeaves( self ):
-        nbLeaves = 0
-        return self.getNbOfChildLeaves( self.tree, nbLeaves )
--- a/commons/core/tree/test/Test_Tree.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-import unittest
-import os
-import time
-from commons.core.tree.Tree import Tree
-from commons.core.utils.FileUtils import FileUtils
-
-
-class Test_Tree( unittest.TestCase ):
-    
-    def setUp( self ):
-        self._tree = Tree()
-        self._uniqId = "%s_%s" % ( time.strftime("%Y%m%d%H%M%S") , os.getpid() )
-        
-        
-    def test_parseTree_oneLeaf( self ):
-        inString = "seq1:0.0023"
-        obs = self._tree.parseTree( inString )
-        exp = { "left":None, "right":None, "name":"seq1", "length":0.0023 }
-        self.assertEqual( obs, exp )
-        
-        
-    def test_parseTree_twoLeaves( self ):
-        inString = "(seq1:0.0023,seq2:0.0017)"
-        obs = self._tree.parseTree( inString )
-        exp = {'length':0, 'right':{'length':0.0016999999999999999, 'right':None, 'name':'seq2', 'left':None}, 'name':'internal', 'left':{'length':0.0023, 'right':None, 'name':'seq1', 'left':None}}
-        self.assertEqual( obs, exp )
-        
-##     def test_parseTree_threeLeaves( self ):
-##         inString = "(seq1:0.0023,(seq2:0.0017,seq3:0.0009))"
-##         obs = self._tree.parseTree( inString )
-##         print obs
-##         exp = {'length':0, 'right':{'length':0.0016999999999999999, 'right':None, 'name':'seq2', 'left':None}, 'name':'internal', 'left':{'length':0.0023, 'right':None, 'name':'seq1', 'left':None}}
-##         self.assertEqual( obs, exp )
-        
-        
-    def test_parseSubTree( self ):
-        inString = "(seq1:0.0023,seq2:0.0017)"
-        lExp = [ "seq1:0.0023", "seq2:0.0017" ]
-        lObs = self._tree.parseSubTree( inString )
-        self.assertEqual( lObs, lExp )
-        
-        
-    def test_saveTree( self ):
-        inFileName = "dummyInFile_%s" % ( self._uniqId )
-        inF = open( inFileName, "w" )
-        inF.write( "(seq4:0.012511,(seq3:0.005340,seq2:0.002201))" )
-        inF.close()
-        self._tree = Tree( inFileName )
-        obsFileName = "dummyObsFile_%s" % ( self._uniqId )
-        self._tree.saveTree( obsFileName )
-        self.assertTrue( FileUtils.are2FilesIdentical( obsFileName, inFileName ) )
-        for f in [ inFileName, obsFileName ]:
-            os.remove( f )
-            
-            
-    def test_retrieveInitialSequenceHeaders( self ):
-        inString = "(seq4:0.012511,(seq3:0.005340,seq2:0.002201))"
-        self._tree.tree = self._tree.parseTree( inString )
-        dNew2Init = { "seq2":"consensus524::215 dmel_chr4 142..765", "seq3":"DmelChr4-B-G387-MAP16", "seq4":"1360|1cl-3gr" }
-        expFileName = "dummyExpFile_%s"  % ( self._uniqId )
-        expF = open( expFileName, "w" )
-        expF.write( "(1360|1cl-3gr:0.012511,(DmelChr4-B-G387-MAP16:0.005340,consensus524-215_dmel_chr4_142..765:0.002201))" )
-        expF.close()
-        obsFileName = "dummyObsFile_%s"  % ( self._uniqId )
-        self._tree.retrieveInitialSequenceHeaders( dNew2Init, obsFileName )
-        self.assertTrue( FileUtils.are2FilesIdentical( obsFileName, expFileName ) )
-        for f in [ expFileName, obsFileName ]:
-            os.remove( f )
-            
-            
-    def test_getNbOfLeaves( self ):
-        inString = "(seq4:0.012511,(seq3:0.005340,seq2:0.002201))"
-        self._tree.tree = self._tree.parseTree( inString )
-        exp = 3
-        obs = self._tree.getNbOfLeaves()
-        self.assertEqual( obs, exp )
-        
-        
-    def test_getNbOfNodes( self ):
-        inString = "(seq4:0.012511,(seq3:0.005340,seq2:0.002201))"
-        self._tree.tree = self._tree.parseTree( inString )
-        exp = 4
-        obs = self._tree.getNbOfNodes()
-        self.assertEqual( obs, exp )
-        
-        
-test_suite = unittest.TestSuite()
-test_suite.addTest( unittest.makeSuite( Test_Tree ) )
-if __name__ == "__main__":
-    unittest.TextTestRunner(verbosity=2).run( test_suite )
--- a/commons/core/tree/test/treeTestSuite.py	Tue Apr 30 14:34:10 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-import unittest
-import sys
-import Test_Tree
-
-
-
-def main():
-
-        commonsTestSuite = unittest.TestSuite() 
-        commonsTestSuite.addTest(unittest.makeSuite(Test_Tree.Test_Tree,'test'))
-        runner = unittest.TextTestRunner(sys.stderr, 2, 2)
-        runner.run(commonsTestSuite)
-
-
-if __name__ == '__main__':
-    main()