X-Git-Url: https://git.donarmstrong.com/?p=mothur.git;a=blobdiff_plain;f=bayesian.cpp;h=8278afb32e1d2c028a83ffdece17ed9910a78e20;hp=80518666cfe5436280f58117fe506dc132bd2754;hb=d1c97b8c04bb75faca1e76ffad60b37a4d789d3d;hpb=8bc3e5b38c2317a1715f53be22fa96455868c281 diff --git a/bayesian.cpp b/bayesian.cpp index 8051866..8278afb 100644 --- a/bayesian.cpp +++ b/bayesian.cpp @@ -10,15 +10,26 @@ #include "bayesian.h" #include "kmer.hpp" #include "phylosummary.h" - +#include "referencedb.h" /**************************************************************************************************/ -Bayesian::Bayesian(string tfile, string tempFile, string method, int ksize, int cutoff, int i) : -Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { +Bayesian::Bayesian(string tfile, string tempFile, string method, int ksize, int cutoff, int i, int tid, bool f, bool sh) : +Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { try { - + ReferenceDB* rdb = ReferenceDB::getInstance(); + + threadID = tid; + flip = f; + shortcuts = sh; + string baseName = tempFile; + + if (baseName == "saved") { baseName = rdb->getSavedReference(); } + + string baseTName = tfile; + if (baseTName == "saved") { baseTName = rdb->getSavedTaxonomy(); } + /************calculate the probablity that each word will be in a specific taxonomy*************/ - string tfileroot = tfile.substr(0,tfile.find_last_of(".")+1); - string tempfileroot = m->getRootName(m->getSimpleName(tempFile)); + string tfileroot = m->getFullPathName(baseTName.substr(0,baseTName.find_last_of(".")+1)); + string tempfileroot = m->getRootName(m->getSimpleName(baseName)); string phyloTreeName = tfileroot + "tree.train"; string phyloTreeSumName = tfileroot + "tree.sum"; string probFileName = tfileroot + tempfileroot + char('0'+ kmerSize) + "mer.prob"; @@ -40,7 +51,23 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { FilesGood = checkReleaseDate(probFileTest, probFileTest2, phyloTreeTest, probFileTest3); } + //if you want to save, but you dont need to calculate then just read + if (rdb->save && probFileTest && probFileTest2 && phyloTreeTest && probFileTest3 && FilesGood && (tempFile != "saved")) { + ifstream saveIn; + m->openInputFile(tempFile, saveIn); + + while (!saveIn.eof()) { + Sequence temp(saveIn); + m->gobble(saveIn); + + rdb->referenceSeqs.push_back(temp); + } + saveIn.close(); + } + if(probFileTest && probFileTest2 && phyloTreeTest && probFileTest3 && FilesGood){ + if (tempFile == "saved") { m->mothurOutEndLine(); m->mothurOut("Using sequences from " + rdb->getSavedReference() + " that are saved in memory."); m->mothurOutEndLine(); } + m->mothurOut("Reading template taxonomy... "); cout.flush(); phyloTree = new PhyloTree(phyloTreeTest, phyloTreeName); @@ -49,17 +76,25 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { genusNodes = phyloTree->getGenusNodes(); genusTotals = phyloTree->getGenusTotals(); - - m->mothurOut("Reading template probabilities... "); cout.flush(); - readProbFile(probFileTest, probFileTest2, probFileName, probFileName2); + if (tfile == "saved") { + m->mothurOutEndLine(); m->mothurOut("Using probabilties from " + rdb->getSavedTaxonomy() + " that are saved in memory... "); cout.flush();; + wordGenusProb = rdb->wordGenusProb; + WordPairDiffArr = rdb->WordPairDiffArr; + }else { + m->mothurOut("Reading template probabilities... "); cout.flush(); + readProbFile(probFileTest, probFileTest2, probFileName, probFileName2); + } + + //save probabilities + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } }else{ //create search database and names vector generateDatabaseAndNames(tfile, tempFile, method, ksize, 0.0, 0.0, 0.0, 0.0); //prevents errors caused by creating shortcut files if you had an error in the sanity check. - if (m->control_pressed) { remove(phyloTreeName.c_str()); remove(probFileName.c_str()); remove(probFileName2.c_str()); } + if (m->control_pressed) { m->mothurRemove(phyloTreeName); m->mothurRemove(probFileName); m->mothurRemove(probFileName2); } else{ genusNodes = phyloTree->getGenusNodes(); genusTotals = phyloTree->getGenusTotals(); @@ -76,10 +111,10 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { //initialze probabilities wordGenusProb.resize(numKmers); - cout << numKmers << '\t' << genusNodes.size() << endl; + WordPairDiffArr.resize(numKmers); + for (int j = 0; j < wordGenusProb.size(); j++) { wordGenusProb[j].resize(genusNodes.size()); } - cout << numKmers << '\t' << genusNodes.size() << endl; - ofstream out; + ofstream out; ofstream out2; #ifdef USE_MPI @@ -90,25 +125,28 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { #endif - m->openOutputFile(probFileName, out); + if (shortcuts) { + m->openOutputFile(probFileName, out); - //output mothur version - out << "#" << m->getVersion() << endl; + //output mothur version + out << "#" << m->getVersion() << endl; - out << numKmers << endl; + out << numKmers << endl; - m->openOutputFile(probFileName2, out2); + m->openOutputFile(probFileName2, out2); - //output mothur version - out2 << "#" << m->getVersion() << endl; + //output mothur version + out2 << "#" << m->getVersion() << endl; + } #ifdef USE_MPI } #endif - //for each word for (int i = 0; i < numKmers; i++) { + //m->mothurOut("[DEBUG]: kmer = " + toString(i) + "\n"); + if (m->control_pressed) { break; } #ifdef USE_MPI @@ -117,7 +155,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - out << i << '\t'; + if (shortcuts) { out << i << '\t'; } #ifdef USE_MPI } @@ -125,23 +163,26 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { vector seqsWithWordi = database->getSequencesWithKmer(i); - map count; - for (int k = 0; k < genusNodes.size(); k++) { count[genusNodes[k]] = 0; } - //for each sequence with that word + vector count; count.resize(genusNodes.size(), 0); for (int j = 0; j < seqsWithWordi.size(); j++) { - int temp = phyloTree->getIndex(names[seqsWithWordi[j]]); + int temp = phyloTree->getGenusIndex(names[seqsWithWordi[j]]); count[temp]++; //increment count of seq in this genus who have this word } - //probabilityInTemplate = (# of seqs with that word in template + 0.05) / (total number of seqs in template + 1); + //probabilityInTemplate = (# of seqs with that word in template + 0.50) / (total number of seqs in template + 1); float probabilityInTemplate = (seqsWithWordi.size() + 0.50) / (float) (names.size() + 1); - + diffPair tempProb(log(probabilityInTemplate), 0.0); + WordPairDiffArr[i] = tempProb; + int numNotZero = 0; for (int k = 0; k < genusNodes.size(); k++) { //probabilityInThisTaxonomy = (# of seqs with that word in this taxonomy + probabilityInTemplate) / (total number of seqs in this taxonomy + 1); - wordGenusProb[i][k] = log((count[genusNodes[k]] + probabilityInTemplate) / (float) (genusTotals[k] + 1)); - if (count[genusNodes[k]] != 0) { + + + wordGenusProb[i][k] = log((count[k] + probabilityInTemplate) / (float) (genusTotals[k] + 1)); + + if (count[k] != 0) { #ifdef USE_MPI int pid; MPI_Comm_rank(MPI_COMM_WORLD, &pid); //find out who we are @@ -149,7 +190,7 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - out << k << '\t' << wordGenusProb[i][k] << '\t'; + if (shortcuts) { out << k << '\t' << wordGenusProb[i][k] << '\t' ; } #ifdef USE_MPI } @@ -165,8 +206,10 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - out << endl; - out2 << probabilityInTemplate << '\t' << numNotZero << endl; + if (shortcuts) { + out << endl; + out2 << probabilityInTemplate << '\t' << numNotZero << '\t' << log(probabilityInTemplate) << endl; + } #ifdef USE_MPI } @@ -179,9 +222,10 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { if (pid == 0) { #endif - out.close(); - out2.close(); - + if (shortcuts) { + out.close(); + out2.close(); + } #ifdef USE_MPI } #endif @@ -191,9 +235,19 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { delete phyloTree; phyloTree = new PhyloTree(phyloTreeTest, phyloTreeName); + + //save probabilities + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } } } - + + if (m->debug) { m->mothurOut("[DEBUG]: about to generateWordPairDiffArr\n"); } + generateWordPairDiffArr(); + if (m->debug) { m->mothurOut("[DEBUG]: done generateWordPairDiffArr\n"); } + + //save probabilities + if (rdb->save) { rdb->wordGenusProb = wordGenusProb; rdb->WordPairDiffArr = WordPairDiffArr; } + m->mothurOut("DONE."); m->mothurOutEndLine(); m->mothurOut("It took " + toString(time(NULL) - start) + " seconds get probabilities. "); m->mothurOutEndLine(); } @@ -205,8 +259,8 @@ Classify(), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) { /**************************************************************************************************/ Bayesian::~Bayesian() { try { - delete phyloTree; - if (database != NULL) { delete database; } + if (phyloTree != NULL) { delete phyloTree; } + if (database != NULL) { delete database; } } catch(exception& e) { m->errorOut(e, "Bayesian", "~Bayesian"); @@ -219,30 +273,51 @@ string Bayesian::getTaxonomy(Sequence* seq) { try { string tax = ""; Kmer kmer(kmerSize); + flipped = false; //get words contained in query //getKmerString returns a string where the index in the string is hte kmer number //and the character at that index can be converted to be the number of times that kmer was seen - string queryKmerString = kmer.getKmerString(seq->getUnaligned()); - + vector queryKmers; - for (int i = 0; i < queryKmerString.length(); i++) { + for (int i = 0; i < queryKmerString.length()-1; i++) { // the -1 is to ignore any kmer with an N in it if (queryKmerString[i] != '!') { //this kmer is in the query queryKmers.push_back(i); } } - if (queryKmers.size() == 0) { m->mothurOut(seq->getName() + "is bad."); m->mothurOutEndLine(); return "bad seq"; } + //if user wants to test reverse compliment and its reversed use that instead + if (flip) { + if (isReversed(queryKmers)) { + flipped = true; + seq->reverseComplement(); + queryKmerString = kmer.getKmerString(seq->getUnaligned()); + queryKmers.clear(); + for (int i = 0; i < queryKmerString.length()-1; i++) { // the -1 is to ignore any kmer with an N in it + if (queryKmerString[i] != '!') { //this kmer is in the query + queryKmers.push_back(i); + } + } + } + } + + if (queryKmers.size() == 0) { m->mothurOut(seq->getName() + " is bad. It has no kmers of length " + toString(kmerSize) + "."); m->mothurOutEndLine(); simpleTax = "unknown;"; return "unknown;"; } + int index = getMostProbableTaxonomy(queryKmers); if (m->control_pressed) { return tax; } -//cout << seq->getName() << '\t' << index << endl; + //bootstrap - to set confidenceScore int numToSelect = queryKmers.size() / 8; + + if (m->debug) { m->mothurOut(seq->getName() + "\t"); } + tax = bootstrapResults(queryKmers, index, numToSelect); - + + if (m->debug) { m->mothurOut("\n"); } + return tax; } catch(exception& e) { @@ -255,16 +330,26 @@ string Bayesian::bootstrapResults(vector kmers, int tax, int numToSelect) { try { map confidenceScores; + + //initialize confidences to 0 + int seqIndex = tax; + TaxNode seq = phyloTree->get(tax); + confidenceScores[tax] = 0; + + while (seq.level != 0) { //while you are not at the root + seqIndex = seq.parent; + confidenceScores[seqIndex] = 0; + seq = phyloTree->get(seq.parent); + } map::iterator itBoot; map::iterator itBoot2; map::iterator itConvert; - + for (int i = 0; i < iters; i++) { if (m->control_pressed) { return "control"; } vector temp; - for (int j = 0; j < numToSelect; j++) { int index = int(rand() % kmers.size()); @@ -274,17 +359,15 @@ string Bayesian::bootstrapResults(vector kmers, int tax, int numToSelect) { //get taxonomy int newTax = getMostProbableTaxonomy(temp); + //int newTax = 1; TaxNode taxonomyTemp = phyloTree->get(newTax); - + //add to confidence results while (taxonomyTemp.level != 0) { //while you are not at the root - itBoot2 = confidenceScores.find(newTax); //is this a classification we already have a count on - if (itBoot2 == confidenceScores.end()) { //not already in confidence scores - confidenceScores[newTax] = 1; - }else{ - confidenceScores[newTax]++; + if (itBoot2 != confidenceScores.end()) { //this is a classification we need a confidence for + (itBoot2->second)++; } newTax = taxonomyTemp.parent; @@ -299,25 +382,29 @@ string Bayesian::bootstrapResults(vector kmers, int tax, int numToSelect) { int seqTaxIndex = tax; TaxNode seqTax = phyloTree->get(tax); + while (seqTax.level != 0) { //while you are not at the root itBoot2 = confidenceScores.find(seqTaxIndex); //is this a classification we already have a count on int confidence = 0; if (itBoot2 != confidenceScores.end()) { //already in confidence scores - confidence = confidenceScores[seqTaxIndex]; + confidence = itBoot2->second; } + if (m->debug) { m->mothurOut(seqTax.name + "(" + toString(((confidence/(float)iters) * 100)) + ");"); } + if (((confidence/(float)iters) * 100) >= confidenceThreshold) { confidenceTax = seqTax.name + "(" + toString(((confidence/(float)iters) * 100)) + ");" + confidenceTax; simpleTax = seqTax.name + ";" + simpleTax; } - + seqTaxIndex = seqTax.parent; seqTax = phyloTree->get(seqTax.parent); } - if (confidenceTax == "") { confidenceTax = "unclassified;"; simpleTax = "unclassified;"; } + if (confidenceTax == "") { confidenceTax = "unknown;"; simpleTax = "unknown;"; } + return confidenceTax; } @@ -333,20 +420,29 @@ int Bayesian::getMostProbableTaxonomy(vector queryKmer) { double maxProbability = -1000000.0; //find taxonomy with highest probability that this sequence is from it + + +// cout << genusNodes.size() << endl; + + for (int k = 0; k < genusNodes.size(); k++) { //for each taxonomy calc its probability - double prob = 1.0; + + double prob = 0.0000; for (int i = 0; i < queryKmer.size(); i++) { prob += wordGenusProb[queryKmer[i]][k]; } - + +// cout << phyloTree->get(genusNodes[k]).name << '\t' << prob << endl; + //is this the taxonomy with the greatest probability? if (prob > maxProbability) { indexofGenus = genusNodes[k]; maxProbability = prob; } } - + + return indexofGenus; } catch(exception& e) { @@ -354,6 +450,46 @@ int Bayesian::getMostProbableTaxonomy(vector queryKmer) { exit(1); } } +//******************************************************************************************************************** +//if it is more probable that the reverse compliment kmers are in the template, then we assume the sequence is reversed. +bool Bayesian::isReversed(vector& queryKmers){ + try{ + bool reversed = false; + float prob = 0; + float reverseProb = 0; + + for (int i = 0; i < queryKmers.size(); i++){ + int kmer = queryKmers[i]; + if (kmer >= 0){ + prob += WordPairDiffArr[kmer].prob; + reverseProb += WordPairDiffArr[kmer].reverseProb; + } + } + + if (reverseProb > prob){ reversed = true; } + + return reversed; + } + catch(exception& e) { + m->errorOut(e, "Bayesian", "isReversed"); + exit(1); + } +} +//******************************************************************************************************************** +int Bayesian::generateWordPairDiffArr(){ + try{ + Kmer kmer(kmerSize); + for (int i = 0; i < WordPairDiffArr.size(); i++) { + int reversedWord = kmer.getReverseKmerNumber(i); + WordPairDiffArr[i].reverseProb = WordPairDiffArr[reversedWord].prob; + } + + return 0; + }catch(exception& e) { + m->errorOut(e, "Bayesian", "generateWordPairDiffArr"); + exit(1); + } +} /************************************************************************************************* map Bayesian::parseTaxMap(string newTax) { try{ @@ -381,15 +517,15 @@ map Bayesian::parseTaxMap(string newTax) { exit(1); } } -/**************************************************************************************************/ +**************************************************************************************************/ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string inNumName) { try{ #ifdef USE_MPI int pid, num, num2, processors; - vector positions; - vector positions2; + vector positions; + vector positions2; MPI_Status status; MPI_File inMPI; @@ -457,7 +593,8 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string int kmer, name; vector numbers; numbers.resize(numKmers); float prob; - vector zeroCountProb; zeroCountProb.resize(numKmers); + vector zeroCountProb; zeroCountProb.resize(numKmers); + WordPairDiffArr.resize(numKmers); //read version length = positions[1] - positions[0]; @@ -479,7 +616,10 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string delete buf4; istringstream iss (tempBuf,istringstream::in); - iss >> zeroCountProb[i] >> numbers[i]; + float probTemp; + iss >> zeroCountProb[i] >> numbers[i] >> probTemp; + WordPairDiffArr[i].prob = probTemp; + } MPI_File_close(&inMPI); @@ -518,7 +658,7 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string string line = m->getline(in); m->gobble(in); in >> numKmers; m->gobble(in); - + //cout << threadID << '\t' << line << '\t' << numKmers << &in << '\t' << &inNum << '\t' << genusNodes.size() << endl; //initialze probabilities wordGenusProb.resize(numKmers); @@ -527,26 +667,32 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string int kmer, name, count; count = 0; vector num; num.resize(numKmers); float prob; - vector zeroCountProb; zeroCountProb.resize(numKmers); + vector zeroCountProb; zeroCountProb.resize(numKmers); + WordPairDiffArr.resize(numKmers); //read version string line2 = m->getline(inNum); m->gobble(inNum); - + float probTemp; + //cout << threadID << '\t' << line2 << '\t' << this << endl; while (inNum) { - inNum >> zeroCountProb[count] >> num[count]; + inNum >> zeroCountProb[count] >> num[count] >> probTemp; + WordPairDiffArr[count].prob = probTemp; count++; m->gobble(inNum); + //cout << threadID << '\t' << count << endl; } inNum.close(); - + //cout << threadID << '\t' << "here1 " << &wordGenusProb << '\t' << &num << endl; // + //cout << threadID << '\t' << &genusTotals << '\t' << endl; + //cout << threadID << '\t' << genusNodes.size() << endl; while(in) { in >> kmer; - + //cout << threadID << '\t' << kmer << endl; //set them all to zero value for (int i = 0; i < genusNodes.size(); i++) { wordGenusProb[kmer][i] = log(zeroCountProb[kmer] / (float) (genusTotals[i]+1)); } - + //cout << threadID << '\t' << num[kmer] << "here" << endl; //get probs for nonzero values for (int i = 0; i < num[kmer]; i++) { in >> name >> prob; @@ -556,7 +702,7 @@ void Bayesian::readProbFile(ifstream& in, ifstream& inNum, string inName, string m->gobble(in); } in.close(); - + //cout << threadID << '\t' << "here" << endl; #endif } catch(exception& e) {