From e0c50fc42e953dbe258feb9bc1d1d7dc10fafff0 Mon Sep 17 00:00:00 2001 From: SarahsWork Date: Tue, 19 Feb 2013 11:30:13 -0500 Subject: [PATCH] working on topdown in pre.cluster --- Mothur.xcodeproj/project.pbxproj | 8 +- preclustercommand.cpp | 143 +++++++++++++++++++---------- preclustercommand.h | 150 +++++++++++++++++++++---------- 3 files changed, 200 insertions(+), 101 deletions(-) diff --git a/Mothur.xcodeproj/project.pbxproj b/Mothur.xcodeproj/project.pbxproj index c66d967..f37c56d 100644 --- a/Mothur.xcodeproj/project.pbxproj +++ b/Mothur.xcodeproj/project.pbxproj @@ -1730,11 +1730,11 @@ A7E9B66512D37EC400DA6239 /* blastdb.hpp */, A74D59A6159A1E3600043046 /* counttable.h */, A74D59A3159A1E2000043046 /* counttable.cpp */, - A7E9B6CD12D37EC400DA6239 /* distancedb.cpp */, A7E9B6BD12D37EC400DA6239 /* database.cpp */, A7E9B6BE12D37EC400DA6239 /* database.hpp */, A7E9B6BF12D37EC400DA6239 /* datavector.hpp */, A7E9B6CE12D37EC400DA6239 /* distancedb.hpp */, + A7E9B6CD12D37EC400DA6239 /* distancedb.cpp */, A7E9B6DE12D37EC400DA6239 /* fastamap.cpp */, A7E9B6DF12D37EC400DA6239 /* fastamap.h */, A7E9B6E812D37EC400DA6239 /* flowdata.h */, @@ -2384,9 +2384,9 @@ GCC_ENABLE_SSE42_EXTENSIONS = NO; GCC_OPTIMIZATION_LEVEL = 3; GCC_PREPROCESSOR_DEFINITIONS = ( - "MOTHUR_FILES=\"\\\"../release\\\"\"", - "VERSION=\"\\\"1.28.0\\\"\"", - "RELEASE_DATE=\"\\\"11/2/2012\\\"\"", + "MOTHUR_FILES=\"\\\"../../release\\\"\"", + "VERSION=\"\\\"1.29.2\\\"\"", + "RELEASE_DATE=\"\\\"2/12/2013\\\"\"", ); "GCC_VERSION[arch=*]" = ""; GCC_WARN_ABOUT_MISSING_NEWLINE = YES; diff --git a/preclustercommand.cpp b/preclustercommand.cpp index a1d5f48..c067915 100644 --- a/preclustercommand.cpp +++ b/preclustercommand.cpp @@ -17,7 +17,7 @@ vector PreClusterCommand::setParameters(){ CommandParameter pname("name", "InputTypes", "", "", "NameCount", "none", "none","name",false,false,true); parameters.push_back(pname); CommandParameter pcount("count", "InputTypes", "", "", "NameCount-CountGroup", "none", "none","count",false,false,true); parameters.push_back(pcount); CommandParameter pgroup("group", "InputTypes", "", "", "CountGroup", "none", "none","",false,false,true); parameters.push_back(pgroup); - CommandParameter pdiffs("diffs", "Number", "", "0", "", "", "","",false,false,true); parameters.push_back(pdiffs); + CommandParameter pdiffs("diffs", "Number", "", "1", "", "", "","",false,false,true); parameters.push_back(pdiffs); CommandParameter pprocessors("processors", "Number", "", "1", "", "", "","",false,false,true); parameters.push_back(pprocessors); CommandParameter ptopdown("topdown", "Boolean", "", "T", "", "", "","",false,false); parameters.push_back(ptopdown); @@ -560,47 +560,88 @@ int PreClusterCommand::process(string newMapFile){ int count = 0; int numSeqs = alignSeqs.size(); - //think about running through twice... - for (int i = 0; i < numSeqs; i++) { - - //are you active - // itActive = active.find(alignSeqs[i].seq.getName()); - - if (alignSeqs[i].active) { //this sequence has not been merged yet - - string chunk = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n"; - - //try to merge it with all smaller seqs - for (int j = i+1; j < numSeqs; j++) { - - if (m->control_pressed) { out.close(); return 0; } - - if (alignSeqs[j].active) { //this sequence has not been merged yet - //are you within "diff" bases - int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); - - if (mismatch <= diffs) { - //merge - alignSeqs[i].names += ',' + alignSeqs[j].names; - alignSeqs[i].numIdentical += alignSeqs[j].numIdentical; - - chunk += alignSeqs[j].seq.getName() + "\t" + toString(alignSeqs[j].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[j].seq.getAligned() + "\n"; - - alignSeqs[j].active = 0; - alignSeqs[j].numIdentical = 0; - count++; - } - }//end if j active - }//end for loop j - - //remove from active list - alignSeqs[i].active = 0; - - out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << chunk << endl;; - - }//end if active i - if(i % 100 == 0) { m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); m->mothurOutEndLine(); } - } + if (topdown) { + //think about running through twice... + for (int i = 0; i < numSeqs; i++) { + + if (alignSeqs[i].active) { //this sequence has not been merged yet + + string chunk = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n"; + + //try to merge it with all smaller seqs + for (int j = i+1; j < numSeqs; j++) { + + if (m->control_pressed) { out.close(); return 0; } + + if (alignSeqs[j].active) { //this sequence has not been merged yet + //are you within "diff" bases + int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); + + if (mismatch <= diffs) { + //merge + alignSeqs[i].names += ',' + alignSeqs[j].names; + alignSeqs[i].numIdentical += alignSeqs[j].numIdentical; + + chunk += alignSeqs[j].seq.getName() + "\t" + toString(alignSeqs[j].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[j].seq.getAligned() + "\n"; + + alignSeqs[j].active = 0; + alignSeqs[j].numIdentical = 0; + count++; + } + }//end if j active + }//end for loop j + + //remove from active list + alignSeqs[i].active = 0; + + out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << chunk << endl;; + + }//end if active i + if(i % 100 == 0) { m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); m->mothurOutEndLine(); } + } + }else { + map mapFile; + map originalCount; + map::iterator itCount; + for (int i = 0; i < numSeqs; i++) { mapFile[i] = ""; originalCount[i] = alignSeqs[i].numIdentical; } + + //think about running through twice... + for (int i = 0; i < numSeqs; i++) { + + //try to merge it into larger seqs + for (int j = i+1; j < numSeqs; j++) { + + if (m->control_pressed) { out.close(); return 0; } + + if (originalCount[j] > originalCount[i]) { //this sequence is more abundant than I am + //are you within "diff" bases + int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); + + if (mismatch <= diffs) { + //merge + alignSeqs[j].names += ',' + alignSeqs[i].names; + alignSeqs[j].numIdentical += alignSeqs[i].numIdentical; + + mapFile[j] = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[i].seq.getAligned() + "\n" + mapFile[i]; + alignSeqs[i].numIdentical = 0; + originalCount.erase(i); + mapFile[i] = ""; + count++; + j+=numSeqs; //exit search, we merged this one in. + } + }//end abundance check + }//end for loop j + + if(i % 100 == 0) { m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); m->mothurOutEndLine(); } + } + + for (int i = 0; i < numSeqs; i++) { + if (alignSeqs[i].numIdentical != 0) { + out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n" << mapFile[i] << endl; + } + } + + } out.close(); if(numSeqs % 100 != 0) { m->mothurOut(toString(numSeqs) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); m->mothurOutEndLine(); } @@ -861,13 +902,19 @@ void PreClusterCommand::readNameFile(){ while (!in.eof()) { in >> firstCol >> secondCol; m->gobble(in); + + for (int i = 0; i < firstCol.length(); i++) { + if (firstCol[i] == ':') { firstCol[i] = '_'; m->changedSeqNames = true; } + } + + int size = 1; + for (int i = 0; i < secondCol.length(); i++) { + if (secondCol[i] == ':') { secondCol[i] = '_'; m->changedSeqNames = true; } + else if(secondCol[i] == ','){ size++; } + } + names[firstCol] = secondCol; - int size = 1; - - for(int i=0;itopdown) { + //think about running through twice... + for (int i = 0; i < numSeqs; i++) { + + //are you active + // itActive = active.find(alignSeqs[i].seq.getName()); + + if (alignSeqs[i].active) { //this sequence has not been merged yet + + string chunk = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n"; - //try to merge it with all smaller seqs - for (int j = i+1; j < numSeqs; j++) { - - if (pDataArray->m->control_pressed) { delete parser; return 0; } - - if (alignSeqs[j].active) { //this sequence has not been merged yet - //are you within "diff" bases - //int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); - int mismatch = 0; - - for (int k = 0; k < alignSeqs[i].seq.getAligned().length(); k++) { - //do they match - if (alignSeqs[i].seq.getAligned()[k] != alignSeqs[j].seq.getAligned()[k]) { mismatch++; } - if (mismatch > pDataArray->diffs) { mismatch = length; break; } //to far to cluster - } - - if (mismatch <= pDataArray->diffs) { - //merge - alignSeqs[i].names += ',' + alignSeqs[j].names; - alignSeqs[i].numIdentical += alignSeqs[j].numIdentical; - - alignSeqs[j].active = 0; - alignSeqs[j].numIdentical = 0; - alignSeqs[j].diffs = mismatch; - count++; - chunk += alignSeqs[j].seq.getName() + "\t" + toString(alignSeqs[j].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[j].seq.getAligned() + "\n"; - } - }//end if j active - }//end for loop j - - //remove from active list - alignSeqs[i].active = 0; - - out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << chunk << endl; - - }//end if active i - if(i % 100 == 0) { pDataArray->m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); pDataArray->m->mothurOutEndLine(); } - } - out.close(); - if(numSeqs % 100 != 0) { pDataArray->m->mothurOut(toString(numSeqs) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); pDataArray->m->mothurOutEndLine(); } + //try to merge it with all smaller seqs + for (int j = i+1; j < numSeqs; j++) { + + if (pDataArray->m->control_pressed) { delete parser; return 0; } + + if (alignSeqs[j].active) { //this sequence has not been merged yet + //are you within "diff" bases + //int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); + int mismatch = 0; + + for (int k = 0; k < alignSeqs[i].seq.getAligned().length(); k++) { + //do they match + if (alignSeqs[i].seq.getAligned()[k] != alignSeqs[j].seq.getAligned()[k]) { mismatch++; } + if (mismatch > pDataArray->diffs) { mismatch = length; break; } //to far to cluster + } + + if (mismatch <= pDataArray->diffs) { + //merge + alignSeqs[i].names += ',' + alignSeqs[j].names; + alignSeqs[i].numIdentical += alignSeqs[j].numIdentical; + + alignSeqs[j].active = 0; + alignSeqs[j].numIdentical = 0; + alignSeqs[j].diffs = mismatch; + count++; + chunk += alignSeqs[j].seq.getName() + "\t" + toString(alignSeqs[j].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[j].seq.getAligned() + "\n"; + } + }//end if j active + }//end for loop j + + //remove from active list + alignSeqs[i].active = 0; + + out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << chunk << endl; + + }//end if active i + if(i % 100 == 0) { pDataArray->m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); pDataArray->m->mothurOutEndLine(); } + } + + }else { + map mapFile; + map originalCount; + map::iterator itCount; + for (int i = 0; i < numSeqs; i++) { mapFile[i] = ""; originalCount[i] = alignSeqs[i].numIdentical; } + + //think about running through twice... + for (int i = 0; i < numSeqs; i++) { + + //try to merge it into larger seqs + for (int j = i+1; j < numSeqs; j++) { + + if (pDataArray->m->control_pressed) { out.close(); return 0; } + + if (originalCount[j] > originalCount[i]) { //this sequence is more abundant than I am + //are you within "diff" bases + //int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned()); + int mismatch = 0; + + for (int k = 0; k < alignSeqs[i].seq.getAligned().length(); k++) { + //do they match + if (alignSeqs[i].seq.getAligned()[k] != alignSeqs[j].seq.getAligned()[k]) { mismatch++; } + if (mismatch > pDataArray->diffs) { mismatch = length; break; } //to far to cluster + } + + if (mismatch <= diffs) { + //merge + alignSeqs[j].names += ',' + alignSeqs[i].names; + alignSeqs[j].numIdentical += alignSeqs[i].numIdentical; + + mapFile[j] = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[i].seq.getAligned() + "\n" + mapFile[i]; + alignSeqs[i].numIdentical = 0; + originalCount.erase(i); + mapFile[i] = ""; + count++; + j+=numSeqs; //exit search, we merged this one in. + } + }//end abundance check + }//end for loop j + + if(i % 100 == 0) { pDataArray->m->mothurOut(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); pDataArray->m->mothurOutEndLine(); } + } + + for (int i = 0; i < numSeqs; i++) { + if (alignSeqs[i].numIdentical != 0) { + out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n" << mapFile[i] << endl; + } + } + + } + out.close(); + if(numSeqs % 100 != 0) { pDataArray->m->mothurOut(toString(numSeqs) + "\t" + toString(numSeqs - count) + "\t" + toString(count)); pDataArray->m->mothurOutEndLine(); } //////////////////////////////////////////////////// if (pDataArray->m->control_pressed) { delete parser; return 0; } -- 2.39.2