+#ifdef USE_MPI
+ }
+ MPI_Barrier(MPI_COMM_WORLD); //make everyone wait
+#endif
+ }
+
+ m->mothurOut("It took " + toString(time(NULL) - start) + " secs to check " + toString(numSeqs) + " sequences."); m->mothurOutEndLine();
+ }
+
+ //set accnos file as new current accnosfile
+ string current = "";
+ itTypes = outputTypes.find("accnos");
+ if (itTypes != outputTypes.end()) {
+ if ((itTypes->second).size() != 0) { current = (itTypes->second)[0]; m->setAccnosFile(current); }
+ }
+
+ if (trim) {
+ itTypes = outputTypes.find("fasta");
+ if (itTypes != outputTypes.end()) {
+ if ((itTypes->second).size() != 0) { current = (itTypes->second)[0]; m->setFastaFile(current); }
+ }
+ }
+
+ itTypes = outputTypes.find("count");
+ if (itTypes != outputTypes.end()) {
+ if ((itTypes->second).size() != 0) { current = (itTypes->second)[0]; m->setCountTableFile(current); }
+ }
+
+ m->mothurOutEndLine();
+ m->mothurOut("Output File Names: "); m->mothurOutEndLine();
+ for (int i = 0; i < outputNames.size(); i++) { m->mothurOut(outputNames[i]); m->mothurOutEndLine(); }
+ m->mothurOutEndLine();
+
+ return 0;
+
+ }
+ catch(exception& e) {
+ m->errorOut(e, "ChimeraSlayerCommand", "execute");
+ exit(1);
+ }
+}
+//**********************************************************************************************************************
+int ChimeraSlayerCommand::MPIExecuteGroups(string outputFileName, string accnosFileName, string trimFastaFileName, map<string, map<string, int> >& fileToPriority, map<string, string>& fileGroup, string countlist, string countfile){
+ try {
+#ifdef USE_MPI
+ int pid;
+ int tag = 2001;
+
+ MPI_Status status;
+ MPI_Comm_rank(MPI_COMM_WORLD, &pid); //find out who we are
+ MPI_Comm_size(MPI_COMM_WORLD, &processors);
+
+ //put filenames in a vector, then pass each process a starting and ending point in the vector
+ //all processes already have the fileToPriority and fileGroup, they just need to know which files to process
+ map<string, map<string, int> >::iterator itFile;
+ vector<string> filenames;
+ for(itFile = fileToPriority.begin(); itFile != fileToPriority.end(); itFile++) { filenames.push_back(itFile->first); }
+
+ int numGroupsPerProcessor = filenames.size() / processors;
+ int startIndex = pid * numGroupsPerProcessor;
+ int endIndex = (pid+1) * numGroupsPerProcessor;
+ if(pid == (processors - 1)){ endIndex = filenames.size(); }
+
+ vector<unsigned long long> MPIPos;
+
+ MPI_File outMPI;
+ MPI_File outMPIAccnos;
+ MPI_File outMPIFasta;
+ MPI_File outMPICount;
+
+ int outMode=MPI_MODE_CREATE|MPI_MODE_WRONLY;
+ int inMode=MPI_MODE_RDONLY;
+
+ char outFilename[1024];
+ strcpy(outFilename, outputFileName.c_str());
+
+ char outAccnosFilename[1024];
+ strcpy(outAccnosFilename, accnosFileName.c_str());
+
+ char outFastaFilename[1024];
+ strcpy(outFastaFilename, trimFastaFileName.c_str());
+
+ char outCountFilename[1024];
+ strcpy(outCountFilename, countlist.c_str());
+
+ MPI_File_open(MPI_COMM_WORLD, outFilename, outMode, MPI_INFO_NULL, &outMPI);
+ MPI_File_open(MPI_COMM_WORLD, outAccnosFilename, outMode, MPI_INFO_NULL, &outMPIAccnos);
+ if (trim) { MPI_File_open(MPI_COMM_WORLD, outFastaFilename, outMode, MPI_INFO_NULL, &outMPIFasta); }
+ if (hasCount && dups) { MPI_File_open(MPI_COMM_WORLD, outCountFilename, outMode, MPI_INFO_NULL, &outMPICount); }
+
+ if (m->control_pressed) { MPI_File_close(&outMPI); if (trim) { MPI_File_close(&outMPIFasta); } MPI_File_close(&outMPIAccnos); if (hasCount && dups) { MPI_File_close(&outMPICount); } return 0; }
+
+ //print headers
+ if (pid == 0) { //you are the root process
+ m->mothurOutEndLine();
+ m->mothurOut("Only reporting sequence supported by " + toString(minBS) + "% of bootstrapped results.");
+ m->mothurOutEndLine();
+
+ string outTemp = "Name\tLeftParent\tRightParent\tDivQLAQRB\tPerIDQLAQRB\tBootStrapA\tDivQLBQRA\tPerIDQLBQRA\tBootStrapB\tFlag\tLeftWindow\tRightWindow\n";
+
+ //print header
+ int length = outTemp.length();
+ char* buf2 = new char[length];
+ memcpy(buf2, outTemp.c_str(), length);
+
+ MPI_File_write_shared(outMPI, buf2, length, MPI_CHAR, &status);
+ delete buf2;
+ }
+ MPI_Barrier(MPI_COMM_WORLD); //make everyone wait
+
+ for (int i = startIndex; i < endIndex; i++) {
+
+ int start = time(NULL);
+ int num = 0;
+ string thisFastaName = filenames[i];
+ map<string, int> thisPriority = fileToPriority[thisFastaName];
+
+ char inFileName[1024];
+ strcpy(inFileName, thisFastaName.c_str());
+ MPI_File inMPI;
+ MPI_File_open(MPI_COMM_SELF, inFileName, inMode, MPI_INFO_NULL, &inMPI); //comm, filename, mode, info, filepointer
+
+ MPIPos = m->setFilePosFasta(thisFastaName, num); //fills MPIPos, returns numSeqs
+
+ cout << endl << "Checking sequences from group: " << fileGroup[thisFastaName] << "." << endl;
+
+ set<string> cnames;
+ driverMPI(0, num, inMPI, outMPI, outMPIAccnos, outMPIFasta, cnames, MPIPos, thisFastaName, thisPriority, true);
+ numSeqs += num;
+
+ MPI_File_close(&inMPI);
+ m->mothurRemove(thisFastaName);
+
+ if (dups) {
+ if (cnames.size() != 0) {
+ if (hasCount) {
+ for (set<string>::iterator it = cnames.begin(); it != cnames.end(); it++) {
+ string outputString = (*it) + "\t" + fileGroup[thisFastaName] + "\n";
+ int length = outputString.length();
+ char* buf2 = new char[length];
+ memcpy(buf2, outputString.c_str(), length);
+ MPI_File_write_shared(outMPICount, buf2, length, MPI_CHAR, &status);
+ delete buf2;
+ }
+ }else {
+ map<string, map<string, string> >::iterator itGroupNameMap = group2NameMap.find(fileGroup[thisFastaName]);
+ if (itGroupNameMap != group2NameMap.end()) {
+ map<string, string> thisnamemap = itGroupNameMap->second;
+ map<string, string>::iterator itN;
+ for (set<string>::iterator it = cnames.begin(); it != cnames.end(); it++) {
+ itN = thisnamemap.find(*it);
+ if (itN != thisnamemap.end()) {
+ vector<string> tempNames; m->splitAtComma(itN->second, tempNames);
+ for (int j = 0; j < tempNames.size(); j++) { //write to accnos file
+ string outputString = tempNames[j] + "\n";
+ int length = outputString.length();
+ char* buf2 = new char[length];
+ memcpy(buf2, outputString.c_str(), length);
+
+ MPI_File_write_shared(outMPIAccnos, buf2, length, MPI_CHAR, &status);
+ delete buf2;
+ }
+
+ }else { m->mothurOut("[ERROR]: parsing cannot find " + *it + ".\n"); m->control_pressed = true; }
+ }
+ }else { m->mothurOut("[ERROR]: parsing cannot find " + fileGroup[thisFastaName] + ".\n"); m->control_pressed = true; }
+ }
+
+ }
+ }
+
+ cout << endl << "It took " << toString(time(NULL) - start) << " secs to check " + toString(num) + " sequences from group " << fileGroup[thisFastaName] << "." << endl;
+ }
+
+ if (pid == 0) {
+ for(int i = 1; i < processors; i++) {
+ int temp = 0;
+ MPI_Recv(&temp, 1, MPI_INT, i, 2001, MPI_COMM_WORLD, &status);
+ numSeqs += temp;
+ }
+ }else{ MPI_Send(&numSeqs, 1, MPI_INT, 0, 2001, MPI_COMM_WORLD); }
+
+ MPI_File_close(&outMPI);
+ MPI_File_close(&outMPIAccnos);
+ if (trim) { MPI_File_close(&outMPIFasta); }
+ if (hasCount && dups) { MPI_File_close(&outMPICount); }
+
+ MPI_Barrier(MPI_COMM_WORLD); //make everyone wait
+#endif
+ return 0;
+
+ }catch(exception& e) {
+ m->errorOut(e, "ChimeraSlayerCommand", "MPIExecuteGroups");
+ exit(1);
+ }
+}
+//**********************************************************************************************************************
+int ChimeraSlayerCommand::MPIExecute(string inputFile, string outputFileName, string accnosFileName, string trimFastaFileName, map<string, int>& priority){
+ try {
+
+#ifdef USE_MPI
+ int pid, numSeqsPerProcessor;
+ int tag = 2001;
+ vector<unsigned long long> MPIPos;
+
+ MPI_Status status;
+ MPI_Comm_rank(MPI_COMM_WORLD, &pid); //find out who we are
+ MPI_Comm_size(MPI_COMM_WORLD, &processors);
+
+ MPI_File inMPI;
+ MPI_File outMPI;
+ MPI_File outMPIAccnos;
+ MPI_File outMPIFasta;
+
+ int outMode=MPI_MODE_CREATE|MPI_MODE_WRONLY;
+ int inMode=MPI_MODE_RDONLY;
+
+ char outFilename[1024];
+ strcpy(outFilename, outputFileName.c_str());
+
+ char outAccnosFilename[1024];
+ strcpy(outAccnosFilename, accnosFileName.c_str());
+
+ char outFastaFilename[1024];
+ strcpy(outFastaFilename, trimFastaFileName.c_str());
+
+ char inFileName[1024];
+ strcpy(inFileName, inputFile.c_str());
+
+ MPI_File_open(MPI_COMM_WORLD, inFileName, inMode, MPI_INFO_NULL, &inMPI); //comm, filename, mode, info, filepointer
+ MPI_File_open(MPI_COMM_WORLD, outFilename, outMode, MPI_INFO_NULL, &outMPI);
+ MPI_File_open(MPI_COMM_WORLD, outAccnosFilename, outMode, MPI_INFO_NULL, &outMPIAccnos);
+ if (trim) { MPI_File_open(MPI_COMM_WORLD, outFastaFilename, outMode, MPI_INFO_NULL, &outMPIFasta); }
+
+ if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); if (trim) { MPI_File_close(&outMPIFasta); } MPI_File_close(&outMPIAccnos); return 0; }
+
+ if (pid == 0) { //you are the root process
+ m->mothurOutEndLine();
+ m->mothurOut("Only reporting sequence supported by " + toString(minBS) + "% of bootstrapped results.");
+ m->mothurOutEndLine();
+
+ string outTemp = "Name\tLeftParent\tRightParent\tDivQLAQRB\tPerIDQLAQRB\tBootStrapA\tDivQLBQRA\tPerIDQLBQRA\tBootStrapB\tFlag\tLeftWindow\tRightWindow\n";
+
+ //print header
+ int length = outTemp.length();
+ char* buf2 = new char[length];
+ memcpy(buf2, outTemp.c_str(), length);
+
+ MPI_File_write_shared(outMPI, buf2, length, MPI_CHAR, &status);
+ delete buf2;
+
+ MPIPos = m->setFilePosFasta(inputFile, numSeqs); //fills MPIPos, returns numSeqs
+
+ if (templatefile != "self") { //if template=self we can only use 1 processor
+ //send file positions to all processes
+ for(int i = 1; i < processors; i++) {
+ MPI_Send(&numSeqs, 1, MPI_INT, i, tag, MPI_COMM_WORLD);
+ MPI_Send(&MPIPos[0], (numSeqs+1), MPI_LONG, i, tag, MPI_COMM_WORLD);
+ }
+ }
+ //figure out how many sequences you have to align
+ numSeqsPerProcessor = numSeqs / processors;
+ int startIndex = pid * numSeqsPerProcessor;
+ if(pid == (processors - 1)){ numSeqsPerProcessor = numSeqs - pid * numSeqsPerProcessor; }
+
+ if (templatefile == "self") { //if template=self we can only use 1 processor
+ startIndex = 0;
+ numSeqsPerProcessor = numSeqs;
+ }
+
+ //do your part
+ set<string> cnames;
+ driverMPI(startIndex, numSeqsPerProcessor, inMPI, outMPI, outMPIAccnos, outMPIFasta, cnames, MPIPos, inputFile, priority, false);
+
+ if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); if (trim) { MPI_File_close(&outMPIFasta); } MPI_File_close(&outMPIAccnos); return 0; }
+
+ }else{ //you are a child process
+ if (templatefile != "self") { //if template=self we can only use 1 processor
+ MPI_Recv(&numSeqs, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
+ MPIPos.resize(numSeqs+1);
+ MPI_Recv(&MPIPos[0], (numSeqs+1), MPI_LONG, 0, tag, MPI_COMM_WORLD, &status);