5 * Created by westcott on 11/3/09.
6 * Copyright 2009 Schloss Lab. All rights reserved.
13 /**************************************************************************************************/
14 Bayesian::Bayesian(string tfile, string tempFile, string method, int ksize, int cutoff, int i) :
15 Classify(tfile, tempFile, method, ksize, 0.0, 0.0, 0.0, 0.0), kmerSize(ksize), confidenceThreshold(cutoff), iters(i) {
18 numKmers = database->getMaxKmer() + 1;
20 //initialze probabilities
21 wordGenusProb.resize(numKmers);
23 genusNodes = phyloTree->getGenusNodes();
25 for (int j = 0; j < wordGenusProb.size(); j++) { wordGenusProb[j].resize(genusNodes.size()); }
27 //reset counts because we are on a new word
28 for (int j = 0; j < genusNodes.size(); j++) {
29 TaxNode temp = phyloTree->get(genusNodes[j]);
30 genusTotals.push_back(temp.accessions.size());
34 /************calculate the probablity that each word will be in a specific taxonomy*************/
36 string probFileName = tempFile.substr(0,tempFile.find_last_of(".")+1) + char('0'+ kmerSize) + "mer.prob";
37 ifstream probFileTest(probFileName.c_str());
40 string probFileName2 = tempFile.substr(0,tempFile.find_last_of(".")+1) + char('0'+ kmerSize) + "mer.numNonZero";
41 ifstream probFileTest2(probFileName2.c_str());
43 int start = time(NULL);
45 if(probFileTest && probFileTest2){
46 mothurOut("Reading template probabilities... "); cout.flush();
47 readProbFile(probFileTest, probFileTest2);
49 mothurOut("Calculating template probabilities... "); cout.flush();
52 openOutputFile(probFileName, out);
55 openOutputFile(probFileName2, out2);
58 for (int i = 0; i < numKmers; i++) {
62 vector<int> seqsWithWordi = database->getSequencesWithKmer(i);
65 for (int k = 0; k < genusNodes.size(); k++) { count[genusNodes[k]] = 0; }
67 //for each sequence with that word
68 for (int j = 0; j < seqsWithWordi.size(); j++) {
69 int temp = phyloTree->getIndex(names[seqsWithWordi[j]]);
70 count[temp]++; //increment count of seq in this genus who have this word
73 //probabilityInTemplate = (# of seqs with that word in template + 0.05) / (total number of seqs in template + 1);
74 float probabilityInTemplate = (seqsWithWordi.size() + 0.50) / (float) (names.size() + 1);
77 for (int k = 0; k < genusNodes.size(); k++) {
78 //probabilityInThisTaxonomy = (# of seqs with that word in this taxonomy + probabilityInTemplate) / (total number of seqs in this taxonomy + 1);
79 wordGenusProb[i][k] = log((count[genusNodes[k]] + probabilityInTemplate) / (float) (genusTotals[k] + 1));
80 if (count[genusNodes[k]] != 0) { out << k << '\t' << wordGenusProb[i][k] << '\t'; numNotZero++; }
83 out2 << probabilityInTemplate << '\t' << numNotZero << endl;
91 mothurOut("DONE."); mothurOutEndLine();
92 mothurOut("It took " + toString(time(NULL) - start) + " seconds get probabilities. "); mothurOutEndLine();
95 errorOut(e, "Bayesian", "getTaxonomy");
99 /**************************************************************************************************/
100 string Bayesian::getTaxonomy(Sequence* seq) {
105 //get words contained in query
106 //getKmerString returns a string where the index in the string is hte kmer number
107 //and the character at that index can be converted to be the number of times that kmer was seen
108 string queryKmerString = kmer.getKmerString(seq->getUnaligned());
109 vector<int> queryKmers;
110 for (int i = 0; i < queryKmerString.length(); i++) {
111 if (queryKmerString[i] != '!') { //this kmer is in the query
112 queryKmers.push_back(i);
116 int index = getMostProbableTaxonomy(queryKmers);
118 //bootstrap - to set confidenceScore
119 int numToSelect = queryKmers.size() / 8;
120 tax = bootstrapResults(queryKmers, index, numToSelect);
124 catch(exception& e) {
125 errorOut(e, "Bayesian", "getTaxonomy");
129 /**************************************************************************************************/
130 string Bayesian::bootstrapResults(vector<int> kmers, int tax, int numToSelect) {
133 //taxConfidenceScore.clear(); //clear out previous seqs scores
135 vector< map<string, int> > confidenceScores; //you need the added vector level of confusion to account for the level that name is seen since they can be the same
136 //map of classification to confidence for all areas seen
137 //ie. Bacteria;Alphaproteobacteria;Rhizobiales;Azorhizobium_et_rel.;Methylobacterium_et_rel.;Bosea;
138 //ie. Bacteria -> 100, Alphaproteobacteria -> 100, Rhizobiales -> 87, Azorhizobium_et_rel. -> 78, Methylobacterium_et_rel. -> 70, Bosea -> 50
139 confidenceScores.resize(100); //if you have more than 100 levels of classification...
141 map<string, int>::iterator itBoot;
142 map<string, int>::iterator itBoot2;
143 map<int, int>::iterator itConvert;
145 for (int i = 0; i < iters; i++) {
148 for (int j = 0; j < numToSelect; j++) {
149 int index = int(rand() % kmers.size());
152 temp.push_back(kmers[index]);
156 int newTax = getMostProbableTaxonomy(temp);
157 TaxNode taxonomy = phyloTree->get(newTax);
159 //add to confidence results
160 while (taxonomy.level != 0) { //while you are not at the root
162 itBoot2 = confidenceScores[taxonomy.level].find(taxonomy.name); //is this a classification we already have a count on
164 if (itBoot2 == confidenceScores[taxonomy.level].end()) { //not already in confidence scores
165 confidenceScores[taxonomy.level][taxonomy.name] = 1;
167 confidenceScores[taxonomy.level][taxonomy.name]++;
170 taxonomy = phyloTree->get(taxonomy.parent);
174 string confidenceTax = "";
176 TaxNode seqTax = phyloTree->get(tax);
178 while (seqTax.level != 0) { //while you are not at the root
180 itBoot2 = confidenceScores[seqTax.level].find(seqTax.name); //is this a classification we already have a count on
183 if (itBoot2 != confidenceScores[seqTax.level].end()) { //not already in confidence scores
184 confidence = confidenceScores[seqTax.level][seqTax.name];
187 if (confidence >= confidenceThreshold) {
188 confidenceTax = seqTax.name + "(" + toString(confidence) + ");" + confidenceTax;
189 simpleTax = seqTax.name + ";" + simpleTax;
192 seqTax = phyloTree->get(seqTax.parent);
195 return confidenceTax;
198 catch(exception& e) {
199 errorOut(e, "Bayesian", "bootstrapResults");
203 /**************************************************************************************************/
204 int Bayesian::getMostProbableTaxonomy(vector<int> queryKmer) {
208 double maxProbability = -1000000.0;
209 //find taxonomy with highest probability that this sequence is from it
210 for (int k = 0; k < genusNodes.size(); k++) {
212 //for each taxonomy calc its probability
214 for (int i = 0; i < queryKmer.size(); i++) {
215 prob += wordGenusProb[queryKmer[i]][k];
218 //is this the taxonomy with the greatest probability?
219 if (prob > maxProbability) {
220 indexofGenus = genusNodes[k];
221 maxProbability = prob;
227 catch(exception& e) {
228 errorOut(e, "Bayesian", "getMostProbableTaxonomy");
232 /*************************************************************************************************
233 map<string, int> Bayesian::parseTaxMap(string newTax) {
236 map<string, int> parsed;
238 newTax = newTax.substr(0, newTax.length()-1); //get rid of last ';'
242 while (newTax.find_first_of(';') != -1) {
243 individual = newTax.substr(0,newTax.find_first_of(';'));
244 newTax = newTax.substr(newTax.find_first_of(';')+1, newTax.length());
245 parsed[individual] = 1;
254 catch(exception& e) {
255 errorOut(e, "Bayesian", "parseTax");
259 /**************************************************************************************************/
260 void Bayesian::readProbFile(ifstream& in, ifstream& inNum) {
263 int kmer, name, count; count = 0;
264 vector<int> num; num.resize(numKmers);
266 vector<float> zeroCountProb; zeroCountProb.resize(numKmers);
269 inNum >> zeroCountProb[count] >> num[count];
278 //set them all to zero value
279 for (int i = 0; i < genusNodes.size(); i++) {
280 wordGenusProb[kmer][i] = log(zeroCountProb[kmer] / (float) (genusTotals[i]+1));
283 //get probs for nonzero values
284 for (int i = 0; i < num[kmer]; i++) {
286 wordGenusProb[kmer][name] = prob;
293 catch(exception& e) {
294 errorOut(e, "Bayesian", "readProbFile");
298 /**************************************************************************************************/