# for emacs: -*- mode: sh; -*- # Drosophila grimshawi -- # # Agencourt's 1 Aug 2005 assembly # # NOTE: this doc may have genePred loads that fail to include # the bin column. Please correct that for the next build by adding # a bin column when you make any of these tables: # # mysql> SELECT tableName, type FROM trackDb WHERE type LIKE "%Pred%"; # +-------------+---------------------------------+ # | tableName | type | # +-------------+---------------------------------+ # | xenoRefGene | genePred xenoRefPep xenoRefMrna | # | genscan | genePred genscanPep | # +-------------+---------------------------------+ # DOWNLOAD SEQUENCE (DONE 8/4/05 angie) ssh kkstore02 mkdir /cluster/store11/droGri1 cd /cluster/data ln -s /cluster/store11/droGri1 droGri1 cd /cluster/data/droGri1 mkdir jkStuff bed mkdir downloads cd downloads wget http://rana.lbl.gov/drosophila/assemblies/dgri_agencourt_arachne_01aug05.tar.gz tar xvzf dgri_agencourt_arachne_01aug05.tar.gz cd agencourt_arachne_01aug05 faSize scaffolds.fa #226113271 bases (11750600 N's 214362671 real 214362671 upper 0 lower) in 25052 sequences in 1 files #Total size: mean 9025.8 sd 186055.6 min 196 (scaffold_22917) max 14170260 (scaffold_25013) median 2278 #N count: mean 469.0 sd 5266.4 #U count: mean 8556.7 sd 183411.7 #L count: mean 0.0 sd 0.0 # DOWNLOAD MITOCHONDRION GENOME SEQUENCE (OR NOT) (DONE 8/4/05 angie) # go to http://www.ncbi.nih.gov/ and search Nucleotide for # "drosophila grimshawi mitochondrion" -- nada. # PARTITION SCAFFOLDS FOR REPEATMASKER RUN (DONE 8/4/05 angie) # Max scaffold size is 26M! so chop up large scaffolds into ~500kb chunks # and glom the tiny scaffolds up into ~500k collections (looks like # some almost-500k pieces are glommed together --> a few almost-1M chunks, # but that's OK). ssh kkstore02 cd /cluster/data/droGri1 mv downloads/agencourt_arachne_01aug05/scaffolds.fa . mkdir scaffoldsSplit faSplit size scaffolds.fa \ 500000 -oneFile scaffoldsSplit -lift=jkStuff/scaffoldsSplit.lft mkdir chunks500k faSplit about scaffoldsSplit.fa 500000 chunks500k/chunk_ # CREATING DATABASE (DONE 8/4/05 angie) # Create the database. ssh hgwdev # Make sure there is at least 5 gig free for the database df -h /var/lib/mysql #/dev/sdc1 1.8T 969G 691G 59% /var/lib/mysql hgsql '' -e 'create database droGri1' # Copy all the data from the table "grp" # in an existing database to the new database hgsql droGri1 -e 'create table grp (PRIMARY KEY(NAME)) select * from dm2.grp' # RUN REPEAT MASKER (DONE 8/4/05 angie) cat /cluster/bluearc/RepeatMasker/Libraries/version #RepBase Update 9.11, RM database version 20050112 # make the run directory, output directory, and job list ssh kkstore02 cd /cluster/data/droGri1 cat << '_EOF_' > jkStuff/RMDrosophila #!/bin/csh -fe cd $1 /bin/mkdir -p /tmp/droGri1/$2 /bin/cp ../chunks500k/$2 /tmp/droGri1/$2/ pushd /tmp/droGri1/$2 /cluster/bluearc/RepeatMasker/RepeatMasker -s -spec drosophila $2 popd /bin/cp /tmp/droGri1/$2/$2.out ./ /bin/rm -fr /tmp/droGri1/$2/* /bin/rmdir --ignore-fail-on-non-empty /tmp/droGri1/$2 /bin/rmdir --ignore-fail-on-non-empty /tmp/droGri1 '_EOF_' # << this line makes emacs coloring happy chmod +x jkStuff/RMDrosophila mkdir RMRun RMOut cp /dev/null RMRun/RMJobs foreach f ( chunks500k/*.fa ) set chunk = $f:t echo ../jkStuff/RMDrosophila \ /cluster/data/droGri1/RMOut $chunk \ '{'check in line+ /cluster/data/droGri1/$f'}' \ '{'check out line+ /cluster/data/droGri1/RMOut/$chunk.out'}' \ >> RMRun/RMJobs end # In case this is the first time that the latest RepeatMasker version has # been run on drosophila, do a little test run to make it unpack the # drosophila lib files into the installation dir if necessary, before # kicking off the cluster run. mkdir /tmp/RMtest cd /tmp/RMtest cp /cluster/data/droGri1/chunks500k/chunk_259.fa . /cluster/bluearc/RepeatMasker/RepeatMasker -qq -species drosophila \ chunk_259.fa cd ..; rm -r RMtest # do the run ssh kk9 cd /cluster/data/droGri1/RMRun para make RMJobs para time #Completed: 426 of 426 jobs #Average job time: 3423s 57.05m 0.95h 0.04d #Longest finished job: 7504s 125.07m 2.08h 0.09d #Submission to last job: 17509s 291.82m 4.86h 0.20d # Lift up the split-scaffold .out's to scaffold .out's ssh kkstore02 cd /cluster/data/droGri1 foreach f (RMOut/*.fa.out) liftUp $f:r:r.scaf.out jkStuff/scaffoldsSplit.lft warn $f > /dev/null end # Make a consolidated scaffold .out file too: head -3 RMOut/chunk_00.fa.out > RMOut/scaffolds.fa.out foreach f (RMOut/chunk*.scaf.out) tail +4 $f >> RMOut/scaffolds.fa.out end # Load the .out files into the database with: ssh hgwdev hgLoadOut droGri1 /cluster/data/droGri1/RMOut/scaffolds.fa.out # hgLoadOut made a "scaffolds_rmsk" table even with -table=rmsk, # but we want a non-split with no prefix table: hgsql droGri1 -e 'rename table scaffolds_rmsk to rmsk' # Fix up the indices too: hgsql droGri1 -e 'drop index bin on rmsk; \ drop index genoStart on rmsk; \ drop index genoEnd on rmsk; \ create index bin on rmsk (genoName(12), bin); \ create index genoStart on rmsk (genoName(12), genoStart);' # EXTRACT AGP FROM ASSEMBLY.LINKS FILE (DONE 1/23/06 angie) ssh hgwdev cd /cluster/data/droGri1 # assembly.links includes negative gap values which are supposed to # indicate overlap between contigs, but when scaffold sequences were # generated, Mike Eisen chose to use +25 instead of the negative values. # So replace negative gap values with 25 for consistency with the # scaffold sequences, then translate to AGP: perl -wpe 'next if (/^#/); @w = split; \ $w[6] = 25 if ($w[6] < 0); $w[7] = 25 if ($w[7] < 0); \ $_ = join("\t", @w) . "\n";' \ downloads/agencourt_arachne_01aug05/assembly.links \ | ~/kent/src/utils/arachneLinksToAgp.pl \ > scaffolds.agp nice checkAgpAndFa scaffolds.agp scaffolds.fa | tail hgGoldGapGl -noGl droGri1 scaffolds.agp # EXTRACTING GAP INFO FROM BLOCKS OF NS (DONE 8/4/05 angie) ssh kkstore02 mkdir /cluster/data/droGri1/bed/fakeAgp cd /cluster/data/droGri1/bed/fakeAgp faGapSizes ../../scaffolds.fa \ -niceSizes=5,10,20,25,30,40,50,100,250,500,1000,10000,100000 # A disproportionately large number of gaps are exactly 25bp long, so # hgFakeAgp's default -minContigGap of 25 will be fine. hgFakeAgp ../../scaffolds.fa fake.agp ssh hgwdev hgLoadGap -unsplit droGri1 /cluster/data/droGri1/bed/fakeAgp/fake.agp # SIMPLE REPEATS (TRF) (DONE 8/4/05 angie) ssh kolossus mkdir /cluster/data/droGri1/bed/simpleRepeat cd /cluster/data/droGri1/bed/simpleRepeat nice trfBig -trf=/cluster/bin/i386/trf \ ../../scaffolds.fa \ /dev/null -bedAt=simpleRepeat.bed -tempDir=/tmp \ |& egrep -v '^(Removed|Tandem|Copyright|Loading|Allocating|Initializing|Computing|Scanning|Freeing)' \ > trf.log & # check on this with tail -f trf.log # Load this into the database as so ssh hgwdev hgLoadBed droGri1 simpleRepeat \ /cluster/data/droGri1/bed/simpleRepeat/simpleRepeat.bed \ -sqlTable=$HOME/kent/src/hg/lib/simpleRepeat.sql # FILTER SIMPLE REPEATS (TRF) INTO MASK (DONE 8/4/05 angie) # make a filtered version of the trf output: # keep trf's with period <= 12: ssh kkstore02 cd /cluster/data/droGri1/bed/simpleRepeat awk '{if ($5 <= 12) print;}' simpleRepeat.bed > trfMask.bed # MASK FA USING REPEATMASKER AND FILTERED TRF FILES (DONE 8/4/05 angie) ssh kkstore02 cd /cluster/data/droGri1 maskOutFa -soft scaffolds.fa \ bed/simpleRepeat/trfMask.bed scaffolds.fa maskOutFa -softAdd scaffolds.fa RMOut/scaffolds.fa.out scaffolds.fa # Now clean up the unmasked chunks to avoid confusion later. rm -r chunks500k # STORE SEQUENCE AND ASSEMBLY INFORMATION (DONE 8/4/05 angie) # Translate to 2bit ssh kkstore02 cd /cluster/data/droGri1 faToTwoBit scaffolds.fa droGri1.2bit # Make chromInfo.tab. mkdir bed/chromInfo twoBitInfo droGri1.2bit stdout \ | awk '{printf "%s\t%s\t/gbdb/droGri1/droGri1.2bit\n", $1, $2;}' \ > bed/chromInfo/chromInfo.tab # Make symbolic a link from /gbdb/droGri1/ to the 2bit. ssh hgwdev mkdir -p /gbdb/droGri1 ln -s /cluster/data/droGri1/droGri1.2bit /gbdb/droGri1/ # Load chromInfo table. hgsql droGri1 < $HOME/kent/src/hg/lib/chromInfo.sql hgsql droGri1 -e 'load data local infile \ "/cluster/data/droGri1/bed/chromInfo/chromInfo.tab" into table chromInfo' # Make chrom.sizes from chromInfo contents and check scaffold count. hgsql droGri1 -N -e 'select chrom,size from chromInfo' \ > /cluster/data/droGri1/chrom.sizes wc -l /cluster/data/droGri1/chrom.sizes # 25052 /cluster/data/droGri1/chrom.sizes # MAKE HGCENTRALTEST ENTRY AND TRACKDB TABLE (DONE 8/5/05 angie) # Warning: genome and organism fields must correspond # with defaultDb values echo 'INSERT INTO dbDb \ (name, description, nibPath, organism, \ defaultPos, active, orderKey, genome, scientificName, \ htmlPath, hgNearOk, hgPbOk, sourceName) values \ ("droGri1", "Aug. 2005", "/gbdb/droGri1", "D. grimshawi", \ "scaffold_24862:854980-869643", 1, 57, \ "D. grimshawi", \ "Drosophila grimshawi", "/gbdb/droGri1/html/description.html", \ 0, 0, "Agencourt 1 Aug 2005");' \ | hgsql -h genome-testdb hgcentraltest echo 'INSERT INTO defaultDb (genome, name) values ("D. grimshawi", "droGri1");' \ | hgsql -h genome-testdb hgcentraltest hgsql -h genome-testdb hgcentraltest \ -e 'INSERT INTO genomeClade (genome, clade, priority) values \ ("D. grimshawi", "insect", 77);' # Make trackDb table so browser knows what tracks to expect: ssh hgwdev cd ~/kent/src/hg/makeDb/trackDb cvsup # Edit trackDb/makefile to add droGri1 to the DBS variable. mkdir drosophila/droGri1 # Create a simple drosophila/droGri1/description.html file. cvs add drosophila/droGri1 cvs add drosophila/droGri1/description.html make update DBS=droGri1 ZOO_DBS= # go public on genome-test cvs ci makefile cvs ci drosophila/droGri1 mkdir /gbdb/droGri1/html # in a clean, updated tree's kent/src/hg/makeDb/trackDb: make alpha # Also edit makeDb/schema/all.joiner, add new db. # MAKE GCPERCENT (DONE 8/4/05 angie) ssh hgwdev mkdir /cluster/data/droGri1/bed/gc5Base cd /cluster/data/droGri1/bed/gc5Base hgGcPercent -wigOut -doGaps -file=stdout -win=5 -verbose=2 droGri1 \ /cluster/data/droGri1 | wigEncode stdin gc5Base.wig gc5Base.wib mkdir /gbdb/droGri1/wib ln -s `pwd`/gc5Base.wib /gbdb/droGri1/wib hgLoadWiggle -pathPrefix=/gbdb/droGri1/wib droGri1 gc5Base gc5Base.wig # PUT SEQUENCE ON /ISCRATCH FOR BLASTZ (DONE 8/4/05 angie) # First, agglomerate small scaffolds into chunks of ~500k median: ssh kkstore02 cd /cluster/data/droGri1 mkdir chunks faSplit about scaffolds.fa 500000 chunks/chunk_ ssh kkr1u00 mkdir /iscratch/i/droGri1 rsync -av /cluster/data/droGri1/chunks /iscratch/i/droGri1/ rsync -av /cluster/data/droGri1/droGri1.2bit /iscratch/i/droGri1/ iSync # PRODUCING GENSCAN PREDICTIONS (DONE 8/5/05 angie) ssh kkstore02 # Make hard-masked scaffolds and split up for processing: cd /cluster/data/droGri1 maskOutFa scaffolds.fa hard scaffolds.fa.masked mkdir chunksHardMasked faSplit about scaffolds.fa.masked 500000 chunksHardMasked/chunk_ mkdir /cluster/data/droGri1/bed/genscan cd /cluster/data/droGri1/bed/genscan # Check out hg3rdParty/genscanlinux to get latest genscan: cvs co hg3rdParty/genscanlinux # Make 3 subdirectories for genscan to put their output files in mkdir gtf pep subopt ls -1S ../../chunksHardMasked/chunk*.fa > chunks.list cat << '_EOF_' > gsub #LOOP gsBig {check in line+ $(path1)} {check out line gtf/$(root1).gtf} -trans={check out line pep/$(root1).pep} -subopt={check out line subopt/$(root1).bed} -exe=hg3rdParty/genscanlinux/genscan -par=hg3rdParty/genscanlinux/HumanIso.smat -tmp=/tmp -window=2400000 #ENDLOOP '_EOF_' # << this line keeps emacs coloring happy ssh kki cd /cluster/data/droGri1/bed/genscan gensub2 chunks.list single gsub jobList para make jobList para time #Completed: 208 of 208 jobs #Average job time: 40s 0.67m 0.01h 0.00d #Longest finished job: 446s 7.43m 0.12h 0.01d #Submission to last job: 602s 10.03m 0.17h 0.01d # If there are crashes, diagnose with "para problems". # If a job crashes due to genscan running out of memory, re-run it # manually with "-window=1200000" instead of "-window=2400000". # Concatenate scaffold-level results: ssh kkstore02 cd /cluster/data/droGri1/bed/genscan cat gtf/*.gtf > genscan.gtf cat subopt/*.bed > genscanSubopt.bed cat pep/*.pep > genscan.pep # Clean up: rm -r /cluster/data/droGri1/chunksHardMasked # Load into the database as so: ssh hgwdev cd /cluster/data/droGri1/bed/genscan ldHgGene -gtf droGri1 genscan genscan.gtf hgPepPred droGri1 generic genscanPep genscan.pep hgLoadBed droGri1 genscanSubopt genscanSubopt.bed # MAKE DOWNLOADABLE FILES (DONE 8/5/05 angie) ssh kkstore02 mkdir /cluster/data/droGri1/zips cd /cluster/data/droGri1 gzip -c RMOut/scaffolds.fa.out > zips/scaffoldOut.gz gzip -c scaffolds.fa > zips/scaffoldFa.gz gzip -c scaffolds.fa.masked > zips/scaffoldFaMasked.gz gzip -c bed/simpleRepeat/trfMask.bed > zips/scaffoldTrf.gz ssh hgwdev mkdir /usr/local/apache/htdocs/goldenPath/droGri1 cd /usr/local/apache/htdocs/goldenPath/droGri1 mkdir bigZips database # Create README.txt files in bigZips/ and database/ to explain the files. cd bigZips ln -s /cluster/data/droGri1/zips/*.gz . nice md5sum *.gz > md5sum.txt # MAKE 11.OOC FILE FOR BLAT (DONE 8/8/05 angie) # Use -repMatch=100 (based on size -- for human we use 1024, and # fly size is ~4.4% of human judging by gapless dm1 genome size from # featureBits -- we would use 45, but bump that up a bit to be more # conservative). ssh kkr1u00 mkdir /cluster/bluearc/droGri1 blat /cluster/data/droGri1/droGri1.2bit /dev/null /dev/null -tileSize=11 \ -makeOoc=/cluster/bluearc/droGri1/11.ooc -repMatch=100 #Wrote 19750 overused 11-mers to /cluster/bluearc/droGri1/11.ooc cp -p /cluster/bluearc/droGri1/*.ooc /iscratch/i/droGri1/ iSync # AUTO UPDATE GENBANK MRNA RUN (DONE 8/11/05 angie) ssh hgwdev # Update genbank config and source in CVS: cd ~/kent/src/hg/makeDb/genbank cvsup . # Edit etc/genbank.conf and add these lines (note scaffold-browser settings): # droGri1 (D. grimshawi) droGri1.genome = /iscratch/i/droGri1/droGri1.2bit droGri1.mondoTwoBitParts = 100 droGri1.lift = no droGri1.refseq.mrna.native.load = no droGri1.refseq.mrna.xeno.load = yes droGri1.refseq.mrna.xeno.pslReps = -minCover=0.15 -minAli=0.75 -nearTop=0.005 # GenBank has no D. grimshawi mRNAs nor ESTs at this point... that may change. droGri1.genbank.mrna.native.load = no droGri1.genbank.mrna.xeno.load = yes droGri1.genbank.est.native.load = no droGri1.genbank.est.xeno.load = no droGri1.downloadDir = droGri1 droGri1.perChromTables = no cvs ci etc/genbank.conf # Since D. grimshawi is a new species for us, edit src/lib/gbGenome.c. # Pick some other browser species, & monkey-see monkey-do. cvs diff src/lib/gbGenome.c make cvs ci src/lib/gbGenome.c # Edit src/align/gbBlat to add /iscratch/i/droGri1/11.ooc cvs diff src/align/gbBlat make cvs ci src/align/gbBlat # Install to /cluster/data/genbank: make install-server ssh eieio cd /cluster/data/genbank # This is an -initial run, (xeno) RefSeq only: nice bin/gbAlignStep -srcDb=refseq -type=mrna -initial droGri1 & tail -f [its logfile] # Load results: ssh hgwdev cd /cluster/data/genbank nice bin/gbDbLoadStep -verbose=1 -drop -initialLoad droGri1 featureBits droGri1 xenoRefGene #11126253 bases of 214363807 (5.190%) in intersection # Clean up: rm -rf work/initial.droGri1 # This is an -initial run, mRNA only: nice bin/gbAlignStep -srcDb=genbank -type=mrna -initial droGri1 & tail -f [its logfile] # Load results: ssh hgwdev cd /cluster/data/genbank nice bin/gbDbLoadStep -verbose=1 -drop -initialLoad droGri1 featureBits droGri1 xenoMrna #16072365 bases of 214363807 (7.498%) in intersection # Clean up: rm -rf work/initial.droGri1 # SWAP CHAINS FROM DM2, BUILD NETS ETC. (DONE 8/9/05 angie) mkdir /cluster/data/droGri1/bed/blastz.dm2.swap cd /cluster/data/droGri1/bed/blastz.dm2.swap doBlastzChainNet.pl -swap /cluster/data/dm2/bed/blastz.droGri1/DEF \ >& do.log echo "check /cluster/data/droGri1/bed/blastz.dm2.swap/do.log" \ | mail -s "check do.log" $USER # Add {chain,net}Dm2 to trackDb.ra if necessary. # MAKE Drosophila Proteins track (DONE braney 08-22-05) ssh kkstore02 mkdir -p /cluster/data/droGri1/blastDb cd /cluster/data/droGri1/blastDb faSplit sequence ../scaffolds.fa 400 x for i in *.fa; do formatdb -i $i -p F 2> /dev/null; done rm *.fa *.log ssh pk mkdir -p /san/sanvol1/scratch/droGri1/blastDb cp /cluster/data/droGri1/blastDb/* /san/sanvol1/scratch/droGri1/blastDb mkdir -p /cluster/data/droGri1/bed/tblastn.dm2FB cd /cluster/data/droGri1/bed/tblastn.dm2FB ls -1S /san/sanvol1/scratch/droGri1/blastDb/*.nsq | sed "s/\.nsq//" > target.lst mkdir fbfa # calculate a reasonable number of jobs calc `wc /cluster/data/dm2/bed/blat.dm2FB/dm2FB.psl|awk "{print \\\$1}"`/\(80000/`wc target.lst | awk "{print \\\$1}"`\) # 18929/(80000/285) = 67.434562 split -l 67 /cluster/data/dm2/bed/blat.dm2FB/dm2FB.psl fbfa/fb cd fbfa for i in *; do pslxToFa $i $i.fa; rm $i; done cd .. ls -1S fbfa/*.fa > fb.lst mkdir -p /cluster/bluearc/droGri1/bed/tblastn.dm2FB/blastOut ln -s /cluster/bluearc/droGri1/bed/tblastn.dm2FB/blastOut for i in `cat fb.lst`; do mkdir blastOut/`basename $i .fa`; done tcsh cat << '_EOF_' > blastGsub #LOOP blastSome $(path1) {check in line $(path2)} {check out exists blastOut/$(root2)/q.$(root1).psl } #ENDLOOP '_EOF_' cat << '_EOF_' > blastSome #!/bin/sh BLASTMAT=/iscratch/i/blast/data export BLASTMAT g=`basename $2` f=/tmp/`basename $3`.$g for eVal in 0.01 0.001 0.0001 0.00001 0.000001 1E-09 1E-11 do if /scratch/blast/blastall -M BLOSUM80 -m 0 -F no -e $eVal -p tblastn -d $1 -i $2 -o $f.8 then mv $f.8 $f.1 break; fi done if test -f $f.1 then if /cluster/bin/i386/blastToPsl $f.1 $f.2 then liftUp -nosort -type=".psl" -pslQ -nohead $3.tmp /cluster/data/dm2/bed/blat.dm2FB/protein.lft warn $f.2 mv $3.tmp $3 rm -f $f.1 $f.2 $f.3 $f.4 exit 0 fi fi rm -f $f.1 $f.2 $3.tmp $f.3 $f.8 $f.4 exit 1 '_EOF_' chmod +x blastSome gensub2 target.lst fb.lst blastGsub blastSpec para create blastSpec para push # Completed: 80655 of 80655 jobs # CPU time in finished jobs: 814527s 13575.45m 226.26h 9.43d 0.026 y # IO & Wait Time: 248647s 4144.12m 69.07h 2.88d 0.008 y # Average job time: 13s 0.22m 0.00h 0.00d # Longest finished job: 1970s 32.83m 0.55h 0.02d # Submission to last job: 18317s 305.28m 5.09h 0.21d cd /cluster/data/droGri1/bed/tblastn.dm2FB tcsh cat << '_EOF_' > chainGsub #LOOP chainSome $(path1) #ENDLOOP '_EOF_' cat << '_EOF_' > chainSome (cd $1; cat q.*.psl | simpleChain -prot -outPsl -maxGap=25000 stdin ../c.`basename $1`.psl) '_EOF_' chmod +x chainSome ls -1dS `pwd`/blastOut/fb?? > chain.lst gensub2 chain.lst single chainGsub chainSpec para create chainSpec para maxNode 20 para push # Completed: 283 of 283 jobs # CPU time in finished jobs: 7849s 130.81m 2.18h 0.09d 0.000 y # IO & Wait Time: 7282s 121.37m 2.02h 0.08d 0.000 y # Average job time: 53s 0.89m 0.01h 0.00d # Longest finished job: 507s 8.45m 0.14h 0.01d # Submission to last job: 874s 14.57m 0.24h 0.01d cd /cluster/data/droGri1/bed/tblastn.dm2FB/blastOut for i in fb?? do awk "(\$13 - \$12)/\$11 > 0.6 {print}" c.$i.psl > c60.$i.psl sort -rn c60.$i.psl | pslUniq stdin u.$i.psl awk "((\$1 / \$11) ) > 0.60 { print }" c60.$i.psl > m60.$i.psl echo $i done sort -T /tmp -k 14,14 -k 16,16n -k 17,17n u.*.psl m60* | uniq > /cluster/data/droGri1/bed/tblastn.dm2FB/blastDm2FB.psl cd .. wc blastDm2FB.psl # 19919 418299 5263114 blastDm2FB.psl pslUniq blastDm2FB.psl stdout | wc # 17978 377538 4946860 cat fbfa/*fa | grep ">" | wc # 82338 82338 1300520 ssh hgwdev cd /cluster/data/droGri1/bed/tblastn.dm2FB hgLoadPsl droGri1 blastDm2FB.psl featureBits droGri1 blastDm2FB # 18966535 bases of 214363807 (8.848%) in intersection exit # back to kkstore02 rm -rf blastOut # End tblastn