X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;f=bam2bcf_indel.c;h=ab9e83ca8f712ae7d7975d67b94b10c702e64908;hb=9f118264ea012adc21a46d7c03eaad4f9ce7d4d4;hp=ab9b499b1afeb4acd7f180002f679fddb8f9eddc;hpb=29e8a5f37699e99ee2e838ee5efcbfbbc442e338;p=samtools.git diff --git a/bam2bcf_indel.c b/bam2bcf_indel.c index ab9b499..ab9e83c 100644 --- a/bam2bcf_indel.c +++ b/bam2bcf_indel.c @@ -3,15 +3,16 @@ #include #include "bam.h" #include "bam2bcf.h" -#include "ksort.h" #include "kaln.h" #include "kprobaln.h" #include "khash.h" KHASH_SET_INIT_STR(rg) +#include "ksort.h" +KSORT_INIT_GENERIC(uint32_t) + #define MINUS_CONST 0x10000000 #define INDEL_WINDOW_SIZE 50 -#define MAX_SCORE 90 void *bcf_call_add_rg(void *_hash, const char *hdtext, const char *list) { @@ -65,7 +66,7 @@ static int tpos2qpos(const bam1_core_t *c, const uint32_t *cigar, int32_t tpos, for (k = 0; k < c->n_cigar; ++k) { int op = cigar[k] & BAM_CIGAR_MASK; int l = cigar[k] >> BAM_CIGAR_SHIFT; - if (op == BAM_CMATCH) { + if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) { if (c->pos > tpos) return y; if (x + l > tpos) { *_tpos = tpos; @@ -111,9 +112,9 @@ static inline int est_indelreg(int pos, const char *ref, int l, char *ins4) int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_callaux_t *bca, const char *ref, const void *rghash) { - extern void ks_introsort_uint32_t(int, uint32_t*); - int i, s, j, k, t, n_types, *types, max_rd_len, left, right, max_ins, *score, N, K, l_run, ref_type, n_alt; - char *inscns = 0, *ref2, *query; + int i, s, j, k, t, n_types, *types, max_rd_len, left, right, max_ins, *score1, *score2, max_ref2; + int N, K, l_run, ref_type, n_alt; + char *inscns = 0, *ref2, *query, **ref_sample; khash_t(rg) *hash = (khash_t(rg)*)rghash; if (ref == 0 || bca == 0) return -1; // mark filtered reads @@ -141,7 +142,7 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla if (s == n) return -1; // there is no indel at this position. for (s = N = 0; s < n; ++s) N += n_plp[s]; // N is the total number of reads { // find out how many types of indels are present - int m; + int m, n_alt = 0, n_tot = 0; uint32_t *aux; aux = calloc(N + 1, 4); m = max_rd_len = 0; @@ -149,19 +150,35 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla for (s = 0; s < n; ++s) { for (i = 0; i < n_plp[s]; ++i) { const bam_pileup1_t *p = plp[s] + i; - if (p->indel != 0 && (rghash == 0 || p->aux == 0)) - aux[m++] = MINUS_CONST + p->indel; + if (rghash == 0 || p->aux == 0) { + ++n_tot; + if (p->indel != 0) { + ++n_alt; + aux[m++] = MINUS_CONST + p->indel; + } + } j = bam_cigar2qlen(&p->b->core, bam1_cigar(p->b)); if (j > max_rd_len) max_rd_len = j; } } + // To prevent long stretches of N's to be mistaken for indels (sometimes thousands of bases), + // check the number of N's in the sequence and skip places where half or more reference bases are Ns. + int nN=0; for (i=pos; i-posi ) return -1; + ks_introsort(uint32_t, m, aux); // squeeze out identical types for (i = 1, n_types = 1; i < m; ++i) if (aux[i] != aux[i-1]) ++n_types; - if (n_types == 1) { // no indels + if (n_types == 1 || (double)n_alt / n_tot < bca->min_frac || n_alt < bca->min_support) { // then skip free(aux); return -1; } + if (n_types >= 64) { + free(aux); + if (bam_verbose >= 2) + fprintf(stderr, "[%s] excessive INDEL alleles at position %d. Skip the position.\n", __func__, pos + 1); + return -1; + } types = (int*)calloc(n_types, sizeof(int)); t = 0; types[t++] = aux[0] - MINUS_CONST; @@ -172,7 +189,6 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla for (t = 0; t < n_types; ++t) if (types[t] == 0) break; ref_type = t; // the index of the reference type (0) - assert(n_types < 64); } { // calculate left and right boundary left = pos > INDEL_WINDOW_SIZE? pos - INDEL_WINDOW_SIZE : 0; @@ -183,6 +199,58 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla if (ref[i] == 0) break; right = i; } + /* The following block fixes a long-existing flaw in the INDEL + * calling model: the interference of nearby SNPs. However, it also + * reduces the power because sometimes, substitutions caused by + * indels are not distinguishable from true mutations. Multiple + * sequence realignment helps to increase the power. + */ + { // construct per-sample consensus + int L = right - left + 1, max_i, max2_i; + uint32_t *cns, max, max2; + char *ref0, *r; + ref_sample = calloc(n, sizeof(void*)); + cns = calloc(L, 4); + ref0 = calloc(L, 1); + for (i = 0; i < right - left; ++i) + ref0[i] = bam_nt16_table[(int)ref[i+left]]; + for (s = 0; s < n; ++s) { + r = ref_sample[s] = calloc(L, 1); + memset(cns, 0, sizeof(int) * L); + // collect ref and non-ref counts + for (i = 0; i < n_plp[s]; ++i) { + bam_pileup1_t *p = plp[s] + i; + bam1_t *b = p->b; + uint32_t *cigar = bam1_cigar(b); + uint8_t *seq = bam1_seq(b); + int x = b->core.pos, y = 0; + for (k = 0; k < b->core.n_cigar; ++k) { + int op = cigar[k]&0xf; + int j, l = cigar[k]>>4; + if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) { + for (j = 0; j < l; ++j) + if (x + j >= left && x + j < right) + cns[x+j-left] += (bam1_seqi(seq, y+j) == ref0[x+j-left])? 1 : 0x10000; + x += l; y += l; + } else if (op == BAM_CDEL || op == BAM_CREF_SKIP) x += l; + else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l; + } + } + // determine the consensus + for (i = 0; i < right - left; ++i) r[i] = ref0[i]; + max = max2 = 0; max_i = max2_i = -1; + for (i = 0; i < right - left; ++i) { + if (cns[i]>>16 >= max>>16) max2 = max, max2_i = max_i, max = cns[i], max_i = i; + else if (cns[i]>>16 >= max2>>16) max2 = cns[i], max2_i = i; + } + if ((double)(max&0xffff) / ((max&0xffff) + (max>>16)) >= 0.7) max_i = -1; + if ((double)(max2&0xffff) / ((max2&0xffff) + (max2>>16)) >= 0.7) max2_i = -1; + if (max_i >= 0) r[max_i] = 15; + if (max2_i >= 0) r[max2_i] = 15; +// for (i = 0; i < right - left; ++i) fputc("=ACMGRSVTWYHKDBN"[(int)r[i]], stderr); fputc('\n', stderr); + } + free(ref0); free(cns); + } { // the length of the homopolymer run around the current position int c = bam_nt16_table[(int)ref[pos + 1]]; if (c == 15) l_run = 1; @@ -230,40 +298,45 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla free(inscns_aux); } // compute the likelihood given each type of indel for each read - ref2 = calloc(right - left + max_ins + 2, 1); + max_ref2 = right - left + 2 + 2 * (max_ins > -types[0]? max_ins : -types[0]); + ref2 = calloc(max_ref2, 1); query = calloc(right - left + max_rd_len + max_ins + 2, 1); - score = calloc(N * n_types, sizeof(int)); + score1 = calloc(N * n_types, sizeof(int)); + score2 = calloc(N * n_types, sizeof(int)); bca->indelreg = 0; for (t = 0; t < n_types; ++t) { int l, ir; - kpa_par_t ap = { 1e-4, 1e-2, 10 }; - ap.bw = abs(types[t]) + 3; + kpa_par_t apf1 = { 1e-4, 1e-2, 10 }, apf2 = { 1e-6, 1e-3, 10 }; + apf1.bw = apf2.bw = abs(types[t]) + 3; // compute indelreg if (types[t] == 0) ir = 0; else if (types[t] > 0) ir = est_indelreg(pos, ref, types[t], &inscns[t*max_ins]); else ir = est_indelreg(pos, ref, -types[t], 0); if (ir > bca->indelreg) bca->indelreg = ir; // fprintf(stderr, "%d, %d, %d\n", pos, types[t], ir); - // write ref2 - for (k = 0, j = left; j <= pos; ++j) - ref2[k++] = bam_nt16_nt4_table[bam_nt16_table[(int)ref[j]]]; - if (types[t] <= 0) j += -types[t]; - else for (l = 0; l < types[t]; ++l) - ref2[k++] = inscns[t*max_ins + l]; - if (types[0] < 0) { // mask deleted sequences to avoid a particular error in the model. - int jj, tmp = types[t] >= 0? -types[0] : -types[0] + types[t]; - for (jj = 0; jj < tmp && j < right && ref[j]; ++jj, ++j) - ref2[k++] = 4; - } - for (; j < right && ref[j]; ++j) - ref2[k++] = bam_nt16_nt4_table[bam_nt16_table[(int)ref[j]]]; - if (j < right) right = j; - // align each read to ref2 + // realignment for (s = K = 0; s < n; ++s) { + // write ref2 + for (k = 0, j = left; j <= pos; ++j) + ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]]; + if (types[t] <= 0) j += -types[t]; + else for (l = 0; l < types[t]; ++l) + ref2[k++] = inscns[t*max_ins + l]; + for (; j < right && ref[j]; ++j) + ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]]; + for (; k < max_ref2; ++k) ref2[k] = 4; + if (j < right) right = j; + // align each read to ref2 for (i = 0; i < n_plp[s]; ++i, ++K) { bam_pileup1_t *p = plp[s] + i; - int qbeg, qend, tbeg, tend, sc; + int qbeg, qend, tbeg, tend, sc, kk; uint8_t *seq = bam1_seq(p->b); + uint32_t *cigar = bam1_cigar(p->b); + if (p->b->core.flag&4) continue; // unmapped reads + // FIXME: the following loop should be better moved outside; nonetheless, realignment should be much slower anyway. + for (kk = 0; kk < p->b->core.n_cigar; ++kk) + if ((cigar[kk]&BAM_CIGAR_MASK) == BAM_CREF_SKIP) break; + if (kk < p->b->core.n_cigar) continue; // FIXME: the following skips soft clips, but using them may be more sensitive. // determine the start and end of sequences for alignment qbeg = tpos2qpos(&p->b->core, bam1_cigar(p->b), left, 0, &tbeg); @@ -275,17 +348,29 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla // write the query sequence for (l = qbeg; l < qend; ++l) query[l - qbeg] = bam_nt16_nt4_table[bam1_seqi(seq, l)]; - { // do alignment; this is the bottleneck + { // do realignment; this is the bottleneck const uint8_t *qual = bam1_qual(p->b), *bq; - uint8_t *qq = 0; + uint8_t *qq; qq = calloc(qend - qbeg, 1); - bq = (uint8_t*)bam_aux_get(p->b, "BQ"); - if (bq) ++bq; - for (l = qbeg; l < qend; ++l) - qq[l - qbeg] = bq? qual[l] + (bq[l] - 33) : qual[l]; + bq = (uint8_t*)bam_aux_get(p->b, "ZQ"); + if (bq) ++bq; // skip type + for (l = qbeg; l < qend; ++l) { + qq[l - qbeg] = bq? qual[l] + (bq[l] - 64) : qual[l]; + if (qq[l - qbeg] > 30) qq[l - qbeg] = 30; + if (qq[l - qbeg] < 7) qq[l - qbeg] = 7; + } sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]), - (uint8_t*)query, qend - qbeg, qq, &ap, 0, 0); - score[K*n_types + t] = sc; + (uint8_t*)query, qend - qbeg, qq, &apf1, 0, 0); + l = (int)(100. * sc / (qend - qbeg) + .499); // used for adjusting indelQ below + if (l > 255) l = 255; + score1[K*n_types + t] = score2[K*n_types + t] = sc<<8 | l; + if (sc > 5) { + sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]), + (uint8_t*)query, qend - qbeg, qq, &apf2, 0, 0); + l = (int)(100. * sc / (qend - qbeg) + .499); + if (l > 255) l = 255; + score2[K*n_types + t] = sc<<8 | l; + } free(qq); } /* @@ -308,7 +393,7 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla for (s = K = 0; s < n; ++s) { for (i = 0; i < n_plp[s]; ++i, ++K) { bam_pileup1_t *p = plp[s] + i; - int *sct = &score[K*n_types], indelQ, seqQ; + int *sct = &score1[K*n_types], indelQ1, indelQ2, seqQ, indelQ; for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t; for (t = 1; t < n_types; ++t) // insertion sort for (j = t; j > 0 && sc[j] < sc[j-1]; --j) @@ -320,18 +405,37 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla * compromise for multi-allelic indels. */ if ((sc[0]&0x3f) == ref_type) { - indelQ = (sc[1]>>6) - (sc[0]>>6); + indelQ1 = (sc[1]>>14) - (sc[0]>>14); seqQ = est_seqQ(bca, types[sc[1]&0x3f], l_run); } else { for (t = 0; t < n_types; ++t) // look for the reference type if ((sc[t]&0x3f) == ref_type) break; - indelQ = (sc[t]>>6) - (sc[0]>>6); + indelQ1 = (sc[t]>>14) - (sc[0]>>14); seqQ = est_seqQ(bca, types[sc[0]&0x3f], l_run); } - if (sc[0]>>6 > MAX_SCORE) indelQ = 0; // too many mismatches; something bad possibly happened - p->aux = (sc[0]&0x3f)<<16 | seqQ<<8 | indelQ; + tmp = sc[0]>>6 & 0xff; + indelQ1 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ1 + .499); // reduce indelQ + sct = &score2[K*n_types]; + for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t; + for (t = 1; t < n_types; ++t) // insertion sort + for (j = t; j > 0 && sc[j] < sc[j-1]; --j) + tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp; + if ((sc[0]&0x3f) == ref_type) { + indelQ2 = (sc[1]>>14) - (sc[0]>>14); + } else { + for (t = 0; t < n_types; ++t) // look for the reference type + if ((sc[t]&0x3f) == ref_type) break; + indelQ2 = (sc[t]>>14) - (sc[0]>>14); + } + tmp = sc[0]>>6 & 0xff; + indelQ2 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ2 + .499); + // pick the smaller between indelQ1 and indelQ2 + indelQ = indelQ1 < indelQ2? indelQ1 : indelQ2; + if (indelQ > 255) indelQ = 255; + if (seqQ > 255) seqQ = 255; + p->aux = (sc[0]&0x3f)<<16 | seqQ<<8 | indelQ; // use 22 bits in total sumq[sc[0]&0x3f] += indelQ < seqQ? indelQ : seqQ; -// fprintf(stderr, "pos=%d read=%d:%d name=%s call=%d q=%d\n", pos, s, i, bam1_qname(p->b), types[sc[0]&0x3f], indelQ); +// fprintf(stderr, "pos=%d read=%d:%d name=%s call=%d indelQ=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), types[sc[0]&0x3f], indelQ, seqQ); } } // determine bca->indel_types[] and bca->inscns @@ -367,8 +471,10 @@ int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_calla } } } - free(score); + free(score1); free(score2); // free + for (i = 0; i < n; ++i) free(ref_sample[i]); + free(ref_sample); free(types); free(inscns); return n_alt > 0? 0 : -1; }