X-Git-Url: https://git.donarmstrong.com/?p=mothur.git;a=blobdiff_plain;f=bayesian.cpp;h=8278afb32e1d2c028a83ffdece17ed9910a78e20;hp=b1f2c4cf422656bb603f1691b67b79b57305490e;hb=d1c97b8c04bb75faca1e76ffad60b37a4d789d3d;hpb=5eca3348fe3962b8965236ca877ef6f52e8fb104 diff --git a/bayesian.cpp b/bayesian.cpp index b1f2c4c..8278afb 100644 --- a/bayesian.cpp +++ b/bayesian.cpp @@ -12,12 +12,14 @@ #include "phylosummary.h" #include "referencedb.h" /**************************************************************************************************/ -Bayesian::Bayesian(string tfile, string tempFile, string method, int ksize, int cutoff, int i, int tid) : -Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { +Bayesian::Bayesian(string tfile, string tempFile, string method, int ksize, int cutoff, int i, int tid, bool f, bool sh) : +Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { try { ReferenceDB* rdb = ReferenceDB::getInstance(); threadID = tid; + flip = f; + shortcuts = sh; string baseName = tempFile; if (baseName == "saved") { baseName = rdb->getSavedReference(); } @@ -26,7 +28,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (baseTName == "saved") { baseTName = rdb->getSavedTaxonomy(); } /************calculate the probablity that each word will be in a specific taxonomy*************/ - string tfileroot = baseTName.substr(0,baseTName.find_last_of(".")+1); + string tfileroot = m->getFullPathName(baseTName.substr(0,baseTName.find_last_of(".")+1)); string tempfileroot = m->getRootName(m->getSimpleName(baseName)); string phyloTreeName = tfileroot + "tree.train"; string phyloTreeSumName = tfileroot + "tree.sum"; @@ -62,7 +64,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { } saveIn.close(); } - + if(probFileTest && probFileTest2 && phyloTreeTest && probFileTest3 && FilesGood){ if (tempFile == "saved") { m->mothurOutEndLine(); m->mothurOut("Using sequences from " + rdb->getSavedReference() + " that are saved in memory."); m->mothurOutEndLine(); } @@ -78,13 +80,14 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (tfile == "saved") { m->mothurOutEndLine(); m->mothurOut("Using probabilties from " + rdb->getSavedTaxonomy() + " that are saved in memory... "); cout.flush();; wordGenusProb = rdb->wordGenusProb; + WordPairDiffArr = rdb->WordPairDiffArr; }else { m->mothurOut("Reading template probabilities... "); cout.flush(); readProbFile(probFileTest, probFileTest2, probFileName, probFileName2); } //save probabilities - if (rdb->save) { rdb->wordGenusProb = wordGenusProb; } + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } }else{ //create search database and names vector @@ -108,11 +111,11 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { //initialze probabilities wordGenusProb.resize(numKmers); - //cout << numKmers << '\t' << genusNodes.size() << endl; + WordPairDiffArr.resize(numKmers); + for (int j = 0; j < wordGenusProb.size(); j++) { wordGenusProb[j].resize(genusNodes.size()); } - //cout << numKmers << '\t' << genusNodes.size() << endl; - //ofstream out; - //ofstream out2; + ofstream out; + ofstream out2; #ifdef USE_MPI int pid; @@ -122,25 +125,28 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { #endif - //m->openOutputFile(probFileName, out); + if (shortcuts) { + m->openOutputFile(probFileName, out); - //output mothur version - //out << "#" << m->getVersion() << endl; + //output mothur version + out << "#" << m->getVersion() << endl; - //out << numKmers << endl; + out << numKmers << endl; - //m->openOutputFile(probFileName2, out2); + m->openOutputFile(probFileName2, out2); - //output mothur version - //out2 << "#" << m->getVersion() << endl; + //output mothur version + out2 << "#" << m->getVersion() << endl; + } #ifdef USE_MPI } #endif - //for each word for (int i = 0; i < numKmers; i++) { + //m->mothurOut("[DEBUG]: kmer = " + toString(i) + "\n"); + if (m->control_pressed) { break; } #ifdef USE_MPI @@ -149,7 +155,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - //out << i << '\t'; + if (shortcuts) { out << i << '\t'; } #ifdef USE_MPI } @@ -157,26 +163,26 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { vector seqsWithWordi = database->getSequencesWithKmer(i); - map count; - for (int k = 0; k < genusNodes.size(); k++) { count[genusNodes[k]] = 0; } - //for each sequence with that word + vector count; count.resize(genusNodes.size(), 0); for (int j = 0; j < seqsWithWordi.size(); j++) { - int temp = phyloTree->getIndex(names[seqsWithWordi[j]]); + int temp = phyloTree->getGenusIndex(names[seqsWithWordi[j]]); count[temp]++; //increment count of seq in this genus who have this word } //probabilityInTemplate = (# of seqs with that word in template + 0.50) / (total number of seqs in template + 1); float probabilityInTemplate = (seqsWithWordi.size() + 0.50) / (float) (names.size() + 1); - + diffPair tempProb(log(probabilityInTemplate), 0.0); + WordPairDiffArr[i] = tempProb; + int numNotZero = 0; for (int k = 0; k < genusNodes.size(); k++) { //probabilityInThisTaxonomy = (# of seqs with that word in this taxonomy + probabilityInTemplate) / (total number of seqs in this taxonomy + 1); - wordGenusProb[i][k] = log((count[genusNodes[k]] + probabilityInTemplate) / (float) (genusTotals[k] + 1)); + wordGenusProb[i][k] = log((count[k] + probabilityInTemplate) / (float) (genusTotals[k] + 1)); - if (count[genusNodes[k]] != 0) { + if (count[k] != 0) { #ifdef USE_MPI int pid; MPI_Comm_rank(MPI_COMM_WORLD, &pid); //find out who we are @@ -184,7 +190,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - //out << k << '\t' << wordGenusProb[i][k] << '\t'; + if (shortcuts) { out << k << '\t' << wordGenusProb[i][k] << '\t' ; } #ifdef USE_MPI } @@ -200,8 +206,10 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - //out << endl; - //out2 << probabilityInTemplate << '\t' << numNotZero << endl; + if (shortcuts) { + out << endl; + out2 << probabilityInTemplate << '\t' << numNotZero << '\t' << log(probabilityInTemplate) << endl; + } #ifdef USE_MPI } @@ -214,9 +222,10 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - //out.close(); - //out2.close(); - + if (shortcuts) { + out.close(); + out2.close(); + } #ifdef USE_MPI } #endif @@ -226,12 +235,19 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { delete phyloTree; phyloTree = new PhyloTree(phyloTreeTest, phyloTreeName); - + //save probabilities - if (rdb->save) { rdb->wordGenusProb = wordGenusProb; } + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } } } - + + if (m->debug) { m->mothurOut("[DEBUG]: about to generateWordPairDiffArr\n"); } + generateWordPairDiffArr(); + if (m->debug) { m->mothurOut("[DEBUG]: done generateWordPairDiffArr\n"); } + + //save probabilities + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } + m->mothurOut("DONE."); m->mothurOutEndLine(); m->mothurOut("It took " + toString(time(NULL) - start) + " seconds get probabilities. "); m->mothurOutEndLine(); } @@ -243,9 +259,8 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { /**************************************************************************************************/ Bayesian::~Bayesian() { try { - - delete phyloTree; - if (database != NULL) { delete database; } + if (phyloTree != NULL) { delete phyloTree; } + if (database != NULL) { delete database; } } catch(exception& e) { m->errorOut(e, "Bayesian", "~Bayesian"); @@ -258,13 +273,13 @@ string Bayesian::getTaxonomy(Sequence* seq) { try { string tax = ""; Kmer kmer(kmerSize); + flipped = false; //get words contained in query //getKmerString returns a string where the index in the string is hte kmer number //and the character at that index can be converted to be the number of times that kmer was seen - string queryKmerString = kmer.getKmerString(seq->getUnaligned()); - + vector queryKmers; for (int i = 0; i < queryKmerString.length()-1; i++) { // the -1 is to ignore any kmer with an N in it if (queryKmerString[i] != '!') { //this kmer is in the query @@ -272,7 +287,22 @@ string Bayesian::getTaxonomy(Sequence* seq) { } } - if (queryKmers.size() == 0) { m->mothurOut(seq->getName() + "is bad."); m->mothurOutEndLine(); return "bad seq"; } + //if user wants to test reverse compliment and its reversed use that instead + if (flip) { + if (isReversed(queryKmers)) { + flipped = true; + seq->reverseComplement(); + queryKmerString = kmer.getKmerString(seq->getUnaligned()); + queryKmers.clear(); + for (int i = 0; i < queryKmerString.length()-1; i++) { // the -1 is to ignore any kmer with an N in it + if (queryKmerString[i] != '!') { //this kmer is in the query + queryKmers.push_back(i); + } + } + } + } + + if (queryKmers.size() == 0) { m->mothurOut(seq->getName() + " is bad. It has no kmers of length " + toString(kmerSize) + "."); m->mothurOutEndLine(); simpleTax = "unknown;"; return "unknown;"; } int index = getMostProbableTaxonomy(queryKmers); @@ -282,8 +312,12 @@ string Bayesian::getTaxonomy(Sequence* seq) { //bootstrap - to set confidenceScore int numToSelect = queryKmers.size() / 8; + if (m->debug) { m->mothurOut(seq->getName() + "\t"); } + tax = bootstrapResults(queryKmers, index, numToSelect); - + + if (m->debug) { m->mothurOut("\n"); } + return tax; } catch(exception& e) { @@ -348,6 +382,7 @@ string Bayesian::bootstrapResults(vector kmers, int tax, int numToSelect) { int seqTaxIndex = tax; TaxNode seqTax = phyloTree->get(tax); + while (seqTax.level != 0) { //while you are not at the root itBoot2 = confidenceScores.find(seqTaxIndex); //is this a classification we already have a count on @@ -357,16 +392,19 @@ string Bayesian::bootstrapResults(vector kmers, int tax, int numToSelect) { confidence = itBoot2->second; } + if (m->debug) { m->mothurOut(seqTax.name + "(" + toString(((confidence/(float)iters) * 100)) + ");"); } + if (((confidence/(float)iters) * 100) >= confidenceThreshold) { confidenceTax = seqTax.name + "(" + toString(((confidence/(float)iters) * 100)) + ");" + confidenceTax; simpleTax = seqTax.name + ";" + simpleTax; } - + seqTaxIndex = seqTax.parent; seqTax = phyloTree->get(seqTax.parent); } - if (confidenceTax == "") { confidenceTax = "unclassified;"; simpleTax = "unclassified;"; } + if (confidenceTax == "") { confidenceTax = "unknown;"; simpleTax = "unknown;"; } + return confidenceTax; } @@ -412,6 +450,46 @@ int Bayesian::getMostProbableTaxonomy(vector queryKmer) { exit(1); } } +//******************************************************************************************************************** +//if it is more probable that the reverse compliment kmers are in the template, then we assume the sequence is reversed. +bool Bayesian::isReversed(vector& queryKmers){ + try{ + bool reversed = false; + float prob = 0; + float reverseProb = 0; + + for (int i = 0; i < queryKmers.size(); i++){ + int kmer = queryKmers[i]; + if (kmer >= 0){ + prob += WordPairDiffArr[kmer].prob; + reverseProb += WordPairDiffArr[kmer].reverseProb; + } + } + + if (reverseProb > prob){ reversed = true; } + + return reversed; + } + catch(exception& e) { + m->errorOut(e, "Bayesian", "isReversed"); + exit(1); + } +} +//******************************************************************************************************************** +int Bayesian::generateWordPairDiffArr(){ + try{ + Kmer kmer(kmerSize); + for (int i = 0; i < WordPairDiffArr.size(); i++) { + int reversedWord = kmer.getReverseKmerNumber(i); + WordPairDiffArr[i].reverseProb = WordPairDiffArr[reversedWord].prob; + } + + return 0; + }catch(exception& e) { + m->errorOut(e, "Bayesian", "generateWordPairDiffArr"); + exit(1); + } +} /************************************************************************************************* map Bayesian::parseTaxMap(string newTax) { try{ @@ -439,7 +517,7 @@ map Bayesian::parseTaxMap(string newTax) { exit(1); } } -/**************************************************************************************************/ +**************************************************************************************************/ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string inNumName) { try{ @@ -515,7 +593,8 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string int kmer, name; vector numbers; numbers.resize(numKmers); float prob; - vector zeroCountProb; zeroCountProb.resize(numKmers); + vector zeroCountProb; zeroCountProb.resize(numKmers); + WordPairDiffArr.resize(numKmers); //read version length = positions[1] - positions[0]; @@ -537,7 +616,10 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string delete buf4; istringstream iss (tempBuf,istringstream::in); - iss >> zeroCountProb[i] >> numbers[i]; + float probTemp; + iss >> zeroCountProb[i] >> numbers[i] >> probTemp; + WordPairDiffArr[i].prob = probTemp; + } MPI_File_close(&inMPI); @@ -585,13 +667,16 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string int kmer, name, count; count = 0; vector num; num.resize(numKmers); float prob; - vector zeroCountProb; zeroCountProb.resize(numKmers); + vector zeroCountProb; zeroCountProb.resize(numKmers); + WordPairDiffArr.resize(numKmers); //read version string line2 = m->getline(inNum); m->gobble(inNum); + float probTemp; //cout << threadID << '\t' << line2 << '\t' << this << endl; while (inNum) { - inNum >> zeroCountProb[count] >> num[count]; + inNum >> zeroCountProb[count] >> num[count] >> probTemp; + WordPairDiffArr[count].prob = probTemp; count++; m->gobble(inNum); //cout << threadID << '\t' << count << endl;