+ vector<int> startPosition;
+ vector<int> endPosition;
+ vector<int> seqLength;
+ vector<int> ambigBases;
+ vector<int> longHomoPolymer;
+
+#ifdef USE_MPI
+ int pid, numSeqsPerProcessor;
+ int tag = 2001;
+ int startTag = 1; int endTag = 2; int lengthTag = 3; int baseTag = 4; int lhomoTag = 5;
+ int outMode=MPI_MODE_CREATE|MPI_MODE_WRONLY;
+ vector<long> MPIPos;
+
+ MPI_Status status;
+ MPI_Status statusOut;
+ MPI_File inMPI;
+ MPI_File outMPI;
+ MPI_Comm_size(MPI_COMM_WORLD, &processors);
+ MPI_Comm_rank(MPI_COMM_WORLD, &pid);
+
+ char tempFileName[1024];
+ strcpy(tempFileName, fastafile.c_str());
+
+ char sumFileName[1024];
+ strcpy(sumFileName, summaryFile.c_str());
+
+ MPI_File_open(MPI_COMM_WORLD, tempFileName, MPI_MODE_RDONLY, MPI_INFO_NULL, &inMPI); //comm, filename, mode, info, filepointer
+ MPI_File_open(MPI_COMM_WORLD, sumFileName, outMode, MPI_INFO_NULL, &outMPI);
+
+ if (m->control_pressed) { MPI_File_close(&inMPI); MPI_File_close(&outMPI); return 0; }
+
+ if (pid == 0) { //you are the root process
+ //print header
+ string outputString = "seqname\tstart\tend\tnbases\tambigs\tpolymer\n";
+ int length = outputString.length();
+ char* buf2 = new char[length];
+ memcpy(buf2, outputString.c_str(), length);
+
+ MPI_File_write_shared(outMPI, buf2, length, MPI_CHAR, &statusOut);
+ delete buf2;
+
+ MPIPos = setFilePosFasta(fastafile, numSeqs); //fills MPIPos, returns numSeqs
+
+ for(int i = 1; i < processors; i++) {
+ MPI_Send(&numSeqs, 1, MPI_INT, i, tag, MPI_COMM_WORLD);
+ MPI_Send(&MPIPos[0], (numSeqs+1), MPI_LONG, i, tag, MPI_COMM_WORLD);
+ }
+
+ //figure out how many sequences you have to do
+ numSeqsPerProcessor = numSeqs / processors;
+ int startIndex = pid * numSeqsPerProcessor;
+ if(pid == (processors - 1)){ numSeqsPerProcessor = numSeqs - pid * numSeqsPerProcessor; }
+
+ //do your part
+ MPICreateSummary(startIndex, numSeqsPerProcessor, startPosition, endPosition, seqLength, ambigBases, longHomoPolymer, inMPI, outMPI, MPIPos);
+
+ }else { //i am the child process
+
+ MPI_Recv(&numSeqs, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
+ MPIPos.resize(numSeqs+1);
+ MPI_Recv(&MPIPos[0], (numSeqs+1), MPI_LONG, 0, tag, MPI_COMM_WORLD, &status);
+
+ //figure out how many sequences you have to align
+ numSeqsPerProcessor = numSeqs / processors;
+ int startIndex = pid * numSeqsPerProcessor;
+ if(pid == (processors - 1)){ numSeqsPerProcessor = numSeqs - pid * numSeqsPerProcessor; }
+
+ //do your part
+ MPICreateSummary(startIndex, numSeqsPerProcessor, startPosition, endPosition, seqLength, ambigBases, longHomoPolymer, inMPI, outMPI, MPIPos);
+ }
+
+ MPI_File_close(&inMPI);
+ MPI_File_close(&outMPI);
+ MPI_Barrier(MPI_COMM_WORLD); //make everyone wait - just in case
+
+ if (pid == 0) {
+ //get the info from the child processes
+ for(int i = 1; i < processors; i++) {
+ int size;
+ MPI_Recv(&size, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &status);
+
+ vector<int> temp; temp.resize(size+1);
+
+ for(int j = 0; j < 5; j++) {
+
+ MPI_Recv(&temp[0], (size+1), MPI_INT, i, 2001, MPI_COMM_WORLD, &status);
+ int receiveTag = temp[temp.size()-1]; //child process added a int to the end to indicate what count this is for
+
+ if (receiveTag == startTag) {
+ for (int k = 0; k < size; k++) { startPosition.push_back(temp[k]); }
+ }else if (receiveTag == endTag) {
+ for (int k = 0; k < size; k++) { endPosition.push_back(temp[k]); }
+ }else if (receiveTag == lengthTag) {
+ for (int k = 0; k < size; k++) { seqLength.push_back(temp[k]); }
+ }else if (receiveTag == baseTag) {
+ for (int k = 0; k < size; k++) { ambigBases.push_back(temp[k]); }
+ }else if (receiveTag == lhomoTag) {
+ for (int k = 0; k < size; k++) { longHomoPolymer.push_back(temp[k]); }
+ }
+ }
+ }
+
+ }else{
+
+ //send my counts
+ int size = startPosition.size();
+ MPI_Send(&size, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
+
+ startPosition.push_back(startTag);
+ int ierr = MPI_Send(&(startPosition[0]), (size+1), MPI_INT, 0, 2001, MPI_COMM_WORLD);
+ endPosition.push_back(endTag);
+ ierr = MPI_Send (&(endPosition[0]), (size+1), MPI_INT, 0, 2001, MPI_COMM_WORLD);
+ seqLength.push_back(lengthTag);
+ ierr = MPI_Send(&(seqLength[0]), (size+1), MPI_INT, 0, 2001, MPI_COMM_WORLD);
+ ambigBases.push_back(baseTag);
+ ierr = MPI_Send(&(ambigBases[0]), (size+1), MPI_INT, 0, 2001, MPI_COMM_WORLD);
+ longHomoPolymer.push_back(lhomoTag);
+ ierr = MPI_Send(&(longHomoPolymer[0]), (size+1), MPI_INT, 0, 2001, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD); //make everyone wait - just in case
+#else
+ #if defined (__APPLE__) || (__MACH__) || (linux) || (__linux)
+ if(processors == 1){
+ ifstream inFASTA;
+ openInputFile(fastafile, inFASTA);
+ getNumSeqs(inFASTA, numSeqs);
+ inFASTA.close();
+
+ lines.push_back(new linePair(0, numSeqs));
+
+ driverCreateSummary(startPosition, endPosition, seqLength, ambigBases, longHomoPolymer, fastafile, summaryFile, lines[0]);
+ }else{
+ numSeqs = setLines(fastafile);
+ createProcessesCreateSummary(startPosition, endPosition, seqLength, ambigBases, longHomoPolymer, fastafile, summaryFile);
+
+ rename((summaryFile + toString(processIDS[0]) + ".temp").c_str(), summaryFile.c_str());
+ //append files
+ for(int i=1;i<processors;i++){
+ appendFiles((summaryFile + toString(processIDS[i]) + ".temp"), summaryFile);
+ remove((summaryFile + toString(processIDS[i]) + ".temp").c_str());
+ }
+ }
+
+ if (m->control_pressed) { return 0; }
+ #else
+ ifstream inFASTA;
+ openInputFile(fastafile, inFASTA);
+ getNumSeqs(inFASTA, numSeqs);
+ inFASTA.close();
+
+ lines.push_back(new linePair(0, numSeqs));
+
+ driverCreateSummary(startPosition, endPosition, seqLength, ambigBases, longHomoPolymer, fastafile, summaryFile, lines[0]);
+ if (m->control_pressed) { return 0; }
+ #endif
+#endif
+
+ #ifdef USE_MPI
+ if (pid == 0) {
+ #endif
+
+ sort(startPosition.begin(), startPosition.end());
+ sort(endPosition.begin(), endPosition.end());