12 KSORT_INIT_GENERIC(uint32_t)
14 #define MINUS_CONST 0x10000000
15 #define INDEL_WINDOW_SIZE 50
17 void *bcf_call_add_rg(void *_hash, const char *hdtext, const char *list)
19 const char *s, *p, *q, *r, *t;
21 if (list == 0 || hdtext == 0) return _hash;
22 if (_hash == 0) _hash = kh_init(rg);
23 hash = (khash_t(rg)*)_hash;
24 if ((s = strstr(hdtext, "@RG\t")) == 0) return hash;
26 t = strstr(s + 4, "@RG\t"); // the next @RG
27 if ((p = strstr(s, "\tID:")) != 0) p += 4;
28 if ((q = strstr(s, "\tPL:")) != 0) q += 4;
29 if (p && q && (t == 0 || (p < t && q < t))) { // ID and PL are both present
32 for (r = p; *r && *r != '\t' && *r != '\n'; ++r); lp = r - p;
33 for (r = q; *r && *r != '\t' && *r != '\n'; ++r); lq = r - q;
34 x = calloc((lp > lq? lp : lq) + 1, 1);
35 for (r = q; *r && *r != '\t' && *r != '\n'; ++r) x[r-q] = *r;
36 if (strstr(list, x)) { // insert ID to the hash table
39 for (r = p; *r && *r != '\t' && *r != '\n'; ++r) x[r-p] = *r;
41 k = kh_get(rg, hash, x);
42 if (k == kh_end(hash)) k = kh_put(rg, hash, x, &ret);
51 void bcf_call_del_rghash(void *_hash)
54 khash_t(rg) *hash = (khash_t(rg)*)_hash;
55 if (hash == 0) return;
56 for (k = kh_begin(hash); k < kh_end(hash); ++k)
57 if (kh_exist(hash, k))
58 free((char*)kh_key(hash, k));
62 static int tpos2qpos(const bam1_core_t *c, const uint32_t *cigar, int32_t tpos, int is_left, int32_t *_tpos)
64 int k, x = c->pos, y = 0, last_y = 0;
66 for (k = 0; k < c->n_cigar; ++k) {
67 int op = cigar[k] & BAM_CIGAR_MASK;
68 int l = cigar[k] >> BAM_CIGAR_SHIFT;
69 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
70 if (c->pos > tpos) return y;
73 return y + (tpos - x);
77 } else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
78 else if (op == BAM_CDEL || op == BAM_CREF_SKIP) {
80 *_tpos = is_left? x : x + l;
89 // FIXME: check if the inserted sequence is consistent with the homopolymer run
90 // l is the relative gap length and l_run is the length of the homopolymer on the reference
91 static inline int est_seqQ(const bcf_callaux_t *bca, int l, int l_run)
94 q = bca->openQ + bca->extQ * (abs(l) - 1);
95 qh = l_run >= 3? (int)(bca->tandemQ * (double)abs(l) / l_run + .499) : 1000;
96 return q < qh? q : qh;
99 static inline int est_indelreg(int pos, const char *ref, int l, char *ins4)
101 int i, j, max = 0, max_i = pos, score = 0;
103 for (i = pos + 1, j = 0; ref[i]; ++i, ++j) {
104 if (ins4) score += (toupper(ref[i]) != "ACGTN"[(int)ins4[j%l]])? -10 : 1;
105 else score += (toupper(ref[i]) != toupper(ref[pos+1+j%l]))? -10 : 1;
106 if (score < 0) break;
107 if (max < score) max = score, max_i = i;
112 int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_callaux_t *bca, const char *ref,
115 int i, s, j, k, t, n_types, *types, max_rd_len, left, right, max_ins, *score1, *score2, max_ref2;
116 int N, K, l_run, ref_type, n_alt;
117 char *inscns = 0, *ref2, *query, **ref_sample;
118 khash_t(rg) *hash = (khash_t(rg)*)rghash;
119 if (ref == 0 || bca == 0) return -1;
120 // mark filtered reads
123 for (s = N = 0; s < n; ++s) {
124 for (i = 0; i < n_plp[s]; ++i) {
125 bam_pileup1_t *p = plp[s] + i;
126 const uint8_t *rg = bam_aux_get(p->b, "RG");
127 p->aux = 1; // filtered by default
129 khint_t k = kh_get(rg, hash, (const char*)(rg + 1));
130 if (k != kh_end(hash)) p->aux = 0, ++N; // not filtered
134 if (N == 0) return -1; // no reads left
136 // determine if there is a gap
137 for (s = N = 0; s < n; ++s) {
138 for (i = 0; i < n_plp[s]; ++i)
139 if (plp[s][i].indel != 0) break;
140 if (i < n_plp[s]) break;
142 if (s == n) return -1; // there is no indel at this position.
143 for (s = N = 0; s < n; ++s) N += n_plp[s]; // N is the total number of reads
144 { // find out how many types of indels are present
145 bca->max_support = bca->max_frac = 0;
146 int m, n_alt = 0, n_tot = 0, indel_support_ok = 0;
148 aux = calloc(N + 1, 4);
150 aux[m++] = MINUS_CONST; // zero indel is always a type
151 for (s = 0; s < n; ++s) {
153 for (i = 0; i < n_plp[s]; ++i) {
154 const bam_pileup1_t *p = plp[s] + i;
155 if (rghash == 0 || p->aux == 0) {
159 aux[m++] = MINUS_CONST + p->indel;
162 j = bam_cigar2qlen(&p->b->core, bam1_cigar(p->b));
163 if (j > max_rd_len) max_rd_len = j;
165 float frac = (float)na/nt;
166 if ( !indel_support_ok && na >= bca->min_support && frac >= bca->min_frac )
167 indel_support_ok = 1;
168 if ( na > bca->max_support && frac > 0 ) bca->max_support = na, bca->max_frac = frac;
172 // To prevent long stretches of N's to be mistaken for indels (sometimes thousands of bases),
173 // check the number of N's in the sequence and skip places where half or more reference bases are Ns.
174 int nN=0; for (i=pos; i-pos<max_rd_len && ref[i]; i++) if ( ref[i]=='N' ) nN++;
175 if ( nN*2>i ) { free(aux); return -1; }
177 ks_introsort(uint32_t, m, aux);
178 // squeeze out identical types
179 for (i = 1, n_types = 1; i < m; ++i)
180 if (aux[i] != aux[i-1]) ++n_types;
181 // Taking totals makes it hard to call rare indels
182 if ( !bca->per_sample_flt )
183 indel_support_ok = ( (float)n_alt / n_tot < bca->min_frac || n_alt < bca->min_support ) ? 0 : 1;
184 if ( n_types == 1 || !indel_support_ok ) { // then skip
185 free(aux); return -1;
189 if (bam_verbose >= 2)
190 fprintf(stderr, "[%s] excessive INDEL alleles at position %d. Skip the position.\n", __func__, pos + 1);
193 types = (int*)calloc(n_types, sizeof(int));
195 types[t++] = aux[0] - MINUS_CONST;
196 for (i = 1; i < m; ++i)
197 if (aux[i] != aux[i-1])
198 types[t++] = aux[i] - MINUS_CONST;
200 for (t = 0; t < n_types; ++t)
201 if (types[t] == 0) break;
202 ref_type = t; // the index of the reference type (0)
204 { // calculate left and right boundary
205 left = pos > INDEL_WINDOW_SIZE? pos - INDEL_WINDOW_SIZE : 0;
206 right = pos + INDEL_WINDOW_SIZE;
207 if (types[0] < 0) right -= types[0];
208 // in case the alignments stand out the reference
209 for (i = pos; i < right; ++i)
210 if (ref[i] == 0) break;
213 /* The following block fixes a long-existing flaw in the INDEL
214 * calling model: the interference of nearby SNPs. However, it also
215 * reduces the power because sometimes, substitutions caused by
216 * indels are not distinguishable from true mutations. Multiple
217 * sequence realignment helps to increase the power.
219 { // construct per-sample consensus
220 int L = right - left + 1, max_i, max2_i;
221 uint32_t *cns, max, max2;
223 ref_sample = calloc(n, sizeof(void*));
226 for (i = 0; i < right - left; ++i)
227 ref0[i] = bam_nt16_table[(int)ref[i+left]];
228 for (s = 0; s < n; ++s) {
229 r = ref_sample[s] = calloc(L, 1);
230 memset(cns, 0, sizeof(int) * L);
231 // collect ref and non-ref counts
232 for (i = 0; i < n_plp[s]; ++i) {
233 bam_pileup1_t *p = plp[s] + i;
235 uint32_t *cigar = bam1_cigar(b);
236 uint8_t *seq = bam1_seq(b);
237 int x = b->core.pos, y = 0;
238 for (k = 0; k < b->core.n_cigar; ++k) {
239 int op = cigar[k]&0xf;
240 int j, l = cigar[k]>>4;
241 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
242 for (j = 0; j < l; ++j)
243 if (x + j >= left && x + j < right)
244 cns[x+j-left] += (bam1_seqi(seq, y+j) == ref0[x+j-left])? 1 : 0x10000;
246 } else if (op == BAM_CDEL || op == BAM_CREF_SKIP) x += l;
247 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
250 // determine the consensus
251 for (i = 0; i < right - left; ++i) r[i] = ref0[i];
252 max = max2 = 0; max_i = max2_i = -1;
253 for (i = 0; i < right - left; ++i) {
254 if (cns[i]>>16 >= max>>16) max2 = max, max2_i = max_i, max = cns[i], max_i = i;
255 else if (cns[i]>>16 >= max2>>16) max2 = cns[i], max2_i = i;
257 if ((double)(max&0xffff) / ((max&0xffff) + (max>>16)) >= 0.7) max_i = -1;
258 if ((double)(max2&0xffff) / ((max2&0xffff) + (max2>>16)) >= 0.7) max2_i = -1;
259 if (max_i >= 0) r[max_i] = 15;
260 if (max2_i >= 0) r[max2_i] = 15;
261 // for (i = 0; i < right - left; ++i) fputc("=ACMGRSVTWYHKDBN"[(int)r[i]], stderr); fputc('\n', stderr);
263 free(ref0); free(cns);
265 { // the length of the homopolymer run around the current position
266 int c = bam_nt16_table[(int)ref[pos + 1]];
267 if (c == 15) l_run = 1;
269 for (i = pos + 2; ref[i]; ++i)
270 if (bam_nt16_table[(int)ref[i]] != c) break;
272 for (i = pos; i >= 0; --i)
273 if (bam_nt16_table[(int)ref[i]] != c) break;
277 // construct the consensus sequence
278 max_ins = types[n_types - 1]; // max_ins is at least 0
280 int *inscns_aux = calloc(4 * n_types * max_ins, sizeof(int));
281 // count the number of occurrences of each base at each position for each type of insertion
282 for (t = 0; t < n_types; ++t) {
284 for (s = 0; s < n; ++s) {
285 for (i = 0; i < n_plp[s]; ++i) {
286 bam_pileup1_t *p = plp[s] + i;
287 if (p->indel == types[t]) {
288 uint8_t *seq = bam1_seq(p->b);
289 for (k = 1; k <= p->indel; ++k) {
290 int c = bam_nt16_nt4_table[bam1_seqi(seq, p->qpos + k)];
291 if (c < 4) ++inscns_aux[(t*max_ins+(k-1))*4 + c];
298 // use the majority rule to construct the consensus
299 inscns = calloc(n_types * max_ins, 1);
300 for (t = 0; t < n_types; ++t) {
301 for (j = 0; j < types[t]; ++j) {
302 int max = 0, max_k = -1, *ia = &inscns_aux[(t*max_ins+j)*4];
303 for (k = 0; k < 4; ++k)
305 max = ia[k], max_k = k;
306 inscns[t*max_ins + j] = max? max_k : 4;
311 // compute the likelihood given each type of indel for each read
312 max_ref2 = right - left + 2 + 2 * (max_ins > -types[0]? max_ins : -types[0]);
313 ref2 = calloc(max_ref2, 1);
314 query = calloc(right - left + max_rd_len + max_ins + 2, 1);
315 score1 = calloc(N * n_types, sizeof(int));
316 score2 = calloc(N * n_types, sizeof(int));
318 for (t = 0; t < n_types; ++t) {
320 kpa_par_t apf1 = { 1e-4, 1e-2, 10 }, apf2 = { 1e-6, 1e-3, 10 };
321 apf1.bw = apf2.bw = abs(types[t]) + 3;
323 if (types[t] == 0) ir = 0;
324 else if (types[t] > 0) ir = est_indelreg(pos, ref, types[t], &inscns[t*max_ins]);
325 else ir = est_indelreg(pos, ref, -types[t], 0);
326 if (ir > bca->indelreg) bca->indelreg = ir;
327 // fprintf(stderr, "%d, %d, %d\n", pos, types[t], ir);
329 for (s = K = 0; s < n; ++s) {
331 for (k = 0, j = left; j <= pos; ++j)
332 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
333 if (types[t] <= 0) j += -types[t];
334 else for (l = 0; l < types[t]; ++l)
335 ref2[k++] = inscns[t*max_ins + l];
336 for (; j < right && ref[j]; ++j)
337 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
338 for (; k < max_ref2; ++k) ref2[k] = 4;
339 if (j < right) right = j;
340 // align each read to ref2
341 for (i = 0; i < n_plp[s]; ++i, ++K) {
342 bam_pileup1_t *p = plp[s] + i;
343 int qbeg, qend, tbeg, tend, sc, kk;
344 uint8_t *seq = bam1_seq(p->b);
345 uint32_t *cigar = bam1_cigar(p->b);
346 if (p->b->core.flag&4) continue; // unmapped reads
347 // FIXME: the following loop should be better moved outside; nonetheless, realignment should be much slower anyway.
348 for (kk = 0; kk < p->b->core.n_cigar; ++kk)
349 if ((cigar[kk]&BAM_CIGAR_MASK) == BAM_CREF_SKIP) break;
350 if (kk < p->b->core.n_cigar) continue;
351 // FIXME: the following skips soft clips, but using them may be more sensitive.
352 // determine the start and end of sequences for alignment
353 qbeg = tpos2qpos(&p->b->core, bam1_cigar(p->b), left, 0, &tbeg);
354 qend = tpos2qpos(&p->b->core, bam1_cigar(p->b), right, 1, &tend);
357 tbeg = tbeg - l > left? tbeg - l : left;
359 // write the query sequence
360 for (l = qbeg; l < qend; ++l)
361 query[l - qbeg] = bam_nt16_nt4_table[bam1_seqi(seq, l)];
362 { // do realignment; this is the bottleneck
363 const uint8_t *qual = bam1_qual(p->b), *bq;
365 qq = calloc(qend - qbeg, 1);
366 bq = (uint8_t*)bam_aux_get(p->b, "ZQ");
367 if (bq) ++bq; // skip type
368 for (l = qbeg; l < qend; ++l) {
369 qq[l - qbeg] = bq? qual[l] + (bq[l] - 64) : qual[l];
370 if (qq[l - qbeg] > 30) qq[l - qbeg] = 30;
371 if (qq[l - qbeg] < 7) qq[l - qbeg] = 7;
373 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
374 (uint8_t*)query, qend - qbeg, qq, &apf1, 0, 0);
375 l = (int)(100. * sc / (qend - qbeg) + .499); // used for adjusting indelQ below
376 if (l > 255) l = 255;
377 score1[K*n_types + t] = score2[K*n_types + t] = sc<<8 | l;
379 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
380 (uint8_t*)query, qend - qbeg, qq, &apf2, 0, 0);
381 l = (int)(100. * sc / (qend - qbeg) + .499);
382 if (l > 255) l = 255;
383 score2[K*n_types + t] = sc<<8 | l;
388 for (l = 0; l < tend - tbeg + abs(types[t]); ++l)
389 fputc("ACGTN"[(int)ref2[tbeg-left+l]], stderr);
391 for (l = 0; l < qend - qbeg; ++l) fputc("ACGTN"[(int)query[l]], stderr);
393 fprintf(stderr, "pos=%d type=%d read=%d:%d name=%s qbeg=%d tbeg=%d score=%d\n", pos, types[t], s, i, bam1_qname(p->b), qbeg, tbeg, sc);
398 free(ref2); free(query);
401 sc = alloca(n_types * sizeof(int));
402 sumq = alloca(n_types * sizeof(int));
403 memset(sumq, 0, sizeof(int) * n_types);
404 for (s = K = 0; s < n; ++s) {
405 for (i = 0; i < n_plp[s]; ++i, ++K) {
406 bam_pileup1_t *p = plp[s] + i;
407 int *sct = &score1[K*n_types], indelQ1, indelQ2, seqQ, indelQ;
408 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
409 for (t = 1; t < n_types; ++t) // insertion sort
410 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
411 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
412 /* errmod_cal() assumes that if the call is wrong, the
413 * likelihoods of other events are equal. This is about
414 * right for substitutions, but is not desired for
415 * indels. To reuse errmod_cal(), I have to make
416 * compromise for multi-allelic indels.
418 if ((sc[0]&0x3f) == ref_type) {
419 indelQ1 = (sc[1]>>14) - (sc[0]>>14);
420 seqQ = est_seqQ(bca, types[sc[1]&0x3f], l_run);
422 for (t = 0; t < n_types; ++t) // look for the reference type
423 if ((sc[t]&0x3f) == ref_type) break;
424 indelQ1 = (sc[t]>>14) - (sc[0]>>14);
425 seqQ = est_seqQ(bca, types[sc[0]&0x3f], l_run);
427 tmp = sc[0]>>6 & 0xff;
428 indelQ1 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ1 + .499); // reduce indelQ
429 sct = &score2[K*n_types];
430 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
431 for (t = 1; t < n_types; ++t) // insertion sort
432 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
433 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
434 if ((sc[0]&0x3f) == ref_type) {
435 indelQ2 = (sc[1]>>14) - (sc[0]>>14);
437 for (t = 0; t < n_types; ++t) // look for the reference type
438 if ((sc[t]&0x3f) == ref_type) break;
439 indelQ2 = (sc[t]>>14) - (sc[0]>>14);
441 tmp = sc[0]>>6 & 0xff;
442 indelQ2 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ2 + .499);
443 // pick the smaller between indelQ1 and indelQ2
444 indelQ = indelQ1 < indelQ2? indelQ1 : indelQ2;
445 if (indelQ > 255) indelQ = 255;
446 if (seqQ > 255) seqQ = 255;
447 p->aux = (sc[0]&0x3f)<<16 | seqQ<<8 | indelQ; // use 22 bits in total
448 sumq[sc[0]&0x3f] += indelQ < seqQ? indelQ : seqQ;
449 // fprintf(stderr, "pos=%d read=%d:%d name=%s call=%d indelQ=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), types[sc[0]&0x3f], indelQ, seqQ);
452 // determine bca->indel_types[] and bca->inscns
453 bca->maxins = max_ins;
454 bca->inscns = realloc(bca->inscns, bca->maxins * 4);
455 for (t = 0; t < n_types; ++t)
456 sumq[t] = sumq[t]<<6 | t;
457 for (t = 1; t < n_types; ++t) // insertion sort
458 for (j = t; j > 0 && sumq[j] > sumq[j-1]; --j)
459 tmp = sumq[j], sumq[j] = sumq[j-1], sumq[j-1] = tmp;
460 for (t = 0; t < n_types; ++t) // look for the reference type
461 if ((sumq[t]&0x3f) == ref_type) break;
462 if (t) { // then move the reference type to the first
464 for (; t > 0; --t) sumq[t] = sumq[t-1];
467 for (t = 0; t < 4; ++t) bca->indel_types[t] = B2B_INDEL_NULL;
468 for (t = 0; t < 4 && t < n_types; ++t) {
469 bca->indel_types[t] = types[sumq[t]&0x3f];
470 memcpy(&bca->inscns[t * bca->maxins], &inscns[(sumq[t]&0x3f) * max_ins], bca->maxins);
473 for (s = n_alt = 0; s < n; ++s) {
474 for (i = 0; i < n_plp[s]; ++i) {
475 bam_pileup1_t *p = plp[s] + i;
476 int x = types[p->aux>>16&0x3f];
477 for (j = 0; j < 4; ++j)
478 if (x == bca->indel_types[j]) break;
479 p->aux = j<<16 | (j == 4? 0 : (p->aux&0xffff));
480 if ((p->aux>>16&0x3f) > 0) ++n_alt;
481 // fprintf(stderr, "X pos=%d read=%d:%d name=%s call=%d type=%d q=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), p->aux>>16&63, bca->indel_types[p->aux>>16&63], p->aux&0xff, p->aux>>8&0xff);
485 free(score1); free(score2);
487 for (i = 0; i < n; ++i) free(ref_sample[i]);
489 free(types); free(inscns);
490 return n_alt > 0? 0 : -1;