+#endif
+
+ //append output files
+ for(int i=0;i<processIDS.size();i++){
+ //newFName = m->getFullPathName(".\\" + newFName);
+ //newNName = m->getFullPathName(".\\" + newNName);
+
+ m->appendFiles((newFName + toString(processIDS[i]) + ".temp"), newFName);
+ m->mothurRemove((newFName + toString(processIDS[i]) + ".temp"));
+
+ m->appendFiles((newNName + toString(processIDS[i]) + ".temp"), newNName);
+ m->mothurRemove((newNName + toString(processIDS[i]) + ".temp"));
+ }
+
+ return num;
+
+ }
+ catch(exception& e) {
+ m->errorOut(e, "PreClusterCommand", "createProcessesGroups");
+ exit(1);
+ }
+}
+/**************************************************************************************************/
+int PreClusterCommand::driverGroups(string newFFile, string newNFile, string newMFile, int start, int end, vector<string> groups){
+ try {
+
+ int numSeqs = 0;
+
+ //precluster each group
+ for (int i = start; i < end; i++) {
+
+ start = time(NULL);
+
+ if (m->control_pressed) { return 0; }
+
+ m->mothurOutEndLine(); m->mothurOut("Processing group " + groups[i] + ":"); m->mothurOutEndLine();
+
+ map<string, string> thisNameMap;
+ vector<Sequence> thisSeqs;
+ if (groupfile != "") {
+ thisSeqs = parser->getSeqs(groups[i]);
+ }else if (countfile != "") {
+ thisSeqs = cparser->getSeqs(groups[i]);
+ }
+ if (namefile != "") { thisNameMap = parser->getNameMap(groups[i]); }
+
+ //fill alignSeqs with this groups info.
+ numSeqs = loadSeqs(thisNameMap, thisSeqs, groups[i]);
+
+ if (m->control_pressed) { return 0; }
+
+ if (diffs > length) { m->mothurOut("Error: diffs is greater than your sequence length."); m->mothurOutEndLine(); m->control_pressed = true; return 0; }
+
+ int count= process(newMFile+groups[i]+".map");
+ outputNames.push_back(newMFile+groups[i]+".map"); outputTypes["map"].push_back(newMFile+groups[i]+".map");
+
+ if (m->control_pressed) { return 0; }
+
+ m->mothurOut("Total number of sequences before pre.cluster was " + toString(alignSeqs.size()) + "."); m->mothurOutEndLine();
+ m->mothurOut("pre.cluster removed " + toString(count) + " sequences."); m->mothurOutEndLine(); m->mothurOutEndLine();
+ printData(newFFile, newNFile, groups[i]);
+
+ m->mothurOut("It took " + toString(time(NULL) - start) + " secs to cluster " + toString(numSeqs) + " sequences."); m->mothurOutEndLine();
+
+ }
+
+ return numSeqs;
+ }
+ catch(exception& e) {
+ m->errorOut(e, "PreClusterCommand", "driverGroups");
+ exit(1);
+ }
+}
+/**************************************************************************************************/
+int PreClusterCommand::process(string newMapFile){
+ try {
+ ofstream out;
+ m->openOutputFile(newMapFile, out);
+
+ //sort seqs by number of identical seqs
+ if (topdown) { sort(alignSeqs.begin(), alignSeqs.end(), comparePriorityTopDown); }
+ else { sort(alignSeqs.begin(), alignSeqs.end(), comparePriorityDownTop); }
+
+ int count = 0;
+ int numSeqs = alignSeqs.size();
+
+ if (topdown) {
+ //think about running through twice...
+ for (int i = 0; i < numSeqs; i++) {
+
+ if (alignSeqs[i].active) { //this sequence has not been merged yet
+
+ string chunk = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n";
+
+ //try to merge it with all smaller seqs
+ for (int j = i+1; j < numSeqs; j++) {
+
+ if (m->control_pressed) { out.close(); return 0; }
+
+ if (alignSeqs[j].active) { //this sequence has not been merged yet
+ //are you within "diff" bases
+ int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned());
+
+ if (mismatch <= diffs) {
+ //merge
+ alignSeqs[i].names += ',' + alignSeqs[j].names;
+ alignSeqs[i].numIdentical += alignSeqs[j].numIdentical;
+
+ chunk += alignSeqs[j].seq.getName() + "\t" + toString(alignSeqs[j].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[j].seq.getAligned() + "\n";
+
+ alignSeqs[j].active = 0;
+ alignSeqs[j].numIdentical = 0;
+ count++;
+ }
+ }//end if j active
+ }//end for loop j
+
+ //remove from active list
+ alignSeqs[i].active = 0;
+
+ out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << chunk << endl;;
+
+ }//end if active i
+ if(i % 100 == 0) { m->mothurOutJustToScreen(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)+"\n"); }
+ }
+ }else {
+ map<int, string> mapFile;
+ map<int, int> originalCount;
+ map<int, int>::iterator itCount;
+ for (int i = 0; i < numSeqs; i++) { mapFile[i] = ""; originalCount[i] = alignSeqs[i].numIdentical; }
+
+ //think about running through twice...
+ for (int i = 0; i < numSeqs; i++) {
+
+ //try to merge it into larger seqs
+ for (int j = i+1; j < numSeqs; j++) {
+
+ if (m->control_pressed) { out.close(); return 0; }
+
+ if (originalCount[j] > originalCount[i]) { //this sequence is more abundant than I am
+ //are you within "diff" bases
+ int mismatch = calcMisMatches(alignSeqs[i].seq.getAligned(), alignSeqs[j].seq.getAligned());
+
+ if (mismatch <= diffs) {
+ //merge
+ alignSeqs[j].names += ',' + alignSeqs[i].names;
+ alignSeqs[j].numIdentical += alignSeqs[i].numIdentical;
+
+ mapFile[j] = alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(mismatch) + "\t" + alignSeqs[i].seq.getAligned() + "\n" + mapFile[i];
+ alignSeqs[i].numIdentical = 0;
+ originalCount.erase(i);
+ mapFile[i] = "";
+ count++;
+ j+=numSeqs; //exit search, we merged this one in.
+ }
+ }//end abundance check
+ }//end for loop j
+
+ if(i % 100 == 0) { m->mothurOutJustToScreen(toString(i) + "\t" + toString(numSeqs - count) + "\t" + toString(count)+"\n"); }
+ }
+
+ for (int i = 0; i < numSeqs; i++) {
+ if (alignSeqs[i].numIdentical != 0) {
+ out << "ideal_seq_" << (i+1) << '\t' << alignSeqs[i].numIdentical << endl << alignSeqs[i].seq.getName() + "\t" + toString(alignSeqs[i].numIdentical) + "\t" + toString(0) + "\t" + alignSeqs[i].seq.getAligned() + "\n" << mapFile[i] << endl;
+ }
+ }
+
+ }
+ out.close();
+