12 KSORT_INIT_GENERIC(uint32_t)
14 #define MINUS_CONST 0x10000000
15 #define INDEL_WINDOW_SIZE 50
17 void *bcf_call_add_rg(void *_hash, const char *hdtext, const char *list)
19 const char *s, *p, *q, *r, *t;
21 if (list == 0 || hdtext == 0) return _hash;
22 if (_hash == 0) _hash = kh_init(rg);
23 hash = (khash_t(rg)*)_hash;
24 if ((s = strstr(hdtext, "@RG\t")) == 0) return hash;
26 t = strstr(s + 4, "@RG\t"); // the next @RG
27 if ((p = strstr(s, "\tID:")) != 0) p += 4;
28 if ((q = strstr(s, "\tPL:")) != 0) q += 4;
29 if (p && q && (t == 0 || (p < t && q < t))) { // ID and PL are both present
32 for (r = p; *r && *r != '\t' && *r != '\n'; ++r); lp = r - p;
33 for (r = q; *r && *r != '\t' && *r != '\n'; ++r); lq = r - q;
34 x = calloc((lp > lq? lp : lq) + 1, 1);
35 for (r = q; *r && *r != '\t' && *r != '\n'; ++r) x[r-q] = *r;
36 if (strstr(list, x)) { // insert ID to the hash table
39 for (r = p; *r && *r != '\t' && *r != '\n'; ++r) x[r-p] = *r;
41 k = kh_get(rg, hash, x);
42 if (k == kh_end(hash)) k = kh_put(rg, hash, x, &ret);
51 void bcf_call_del_rghash(void *_hash)
54 khash_t(rg) *hash = (khash_t(rg)*)_hash;
55 if (hash == 0) return;
56 for (k = kh_begin(hash); k < kh_end(hash); ++k)
57 if (kh_exist(hash, k))
58 free((char*)kh_key(hash, k));
62 static int tpos2qpos(const bam1_core_t *c, const uint32_t *cigar, int32_t tpos, int is_left, int32_t *_tpos)
64 int k, x = c->pos, y = 0, last_y = 0;
66 for (k = 0; k < c->n_cigar; ++k) {
67 int op = cigar[k] & BAM_CIGAR_MASK;
68 int l = cigar[k] >> BAM_CIGAR_SHIFT;
69 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
70 if (c->pos > tpos) return y;
73 return y + (tpos - x);
77 } else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
78 else if (op == BAM_CDEL || op == BAM_CREF_SKIP) {
80 *_tpos = is_left? x : x + l;
89 // FIXME: check if the inserted sequence is consistent with the homopolymer run
90 // l is the relative gap length and l_run is the length of the homopolymer on the reference
91 static inline int est_seqQ(const bcf_callaux_t *bca, int l, int l_run)
94 q = bca->openQ + bca->extQ * (abs(l) - 1);
95 qh = l_run >= 3? (int)(bca->tandemQ * (double)abs(l) / l_run + .499) : 1000;
96 return q < qh? q : qh;
99 static inline int est_indelreg(int pos, const char *ref, int l, char *ins4)
101 int i, j, max = 0, max_i = pos, score = 0;
103 for (i = pos + 1, j = 0; ref[i]; ++i, ++j) {
104 if (ins4) score += (toupper(ref[i]) != "ACGTN"[(int)ins4[j%l]])? -10 : 1;
105 else score += (toupper(ref[i]) != toupper(ref[pos+1+j%l]))? -10 : 1;
106 if (score < 0) break;
107 if (max < score) max = score, max_i = i;
112 int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_callaux_t *bca, const char *ref,
115 int i, s, j, k, t, n_types, *types, max_rd_len, left, right, max_ins, *score1, *score2, max_ref2;
116 int N, K, l_run, ref_type, n_alt;
117 char *inscns = 0, *ref2, *query, **ref_sample;
118 khash_t(rg) *hash = (khash_t(rg)*)rghash;
119 if (ref == 0 || bca == 0) return -1;
120 // mark filtered reads
123 for (s = N = 0; s < n; ++s) {
124 for (i = 0; i < n_plp[s]; ++i) {
125 bam_pileup1_t *p = plp[s] + i;
126 const uint8_t *rg = bam_aux_get(p->b, "RG");
127 p->aux = 1; // filtered by default
129 khint_t k = kh_get(rg, hash, (const char*)(rg + 1));
130 if (k != kh_end(hash)) p->aux = 0, ++N; // not filtered
134 if (N == 0) return -1; // no reads left
136 // determine if there is a gap
137 for (s = N = 0; s < n; ++s) {
138 for (i = 0; i < n_plp[s]; ++i)
139 if (plp[s][i].indel != 0) break;
140 if (i < n_plp[s]) break;
142 if (s == n) return -1; // there is no indel at this position.
143 for (s = N = 0; s < n; ++s) N += n_plp[s]; // N is the total number of reads
144 { // find out how many types of indels are present
145 int m, n_alt = 0, n_tot = 0;
147 aux = calloc(N + 1, 4);
149 aux[m++] = MINUS_CONST; // zero indel is always a type
150 for (s = 0; s < n; ++s) {
151 for (i = 0; i < n_plp[s]; ++i) {
152 const bam_pileup1_t *p = plp[s] + i;
153 if (rghash == 0 || p->aux == 0) {
157 aux[m++] = MINUS_CONST + p->indel;
160 j = bam_cigar2qlen(&p->b->core, bam1_cigar(p->b));
161 if (j > max_rd_len) max_rd_len = j;
164 // To prevent long stretches of N's to be mistaken for indels (sometimes thousands of bases),
165 // check the number of N's in the sequence and skip places where half or more reference bases are Ns.
166 int nN=0; for (i=pos; i-pos<max_rd_len && ref[i]; i++) if ( ref[i]=='N' ) nN++;
167 if ( nN*2>i ) return -1;
169 ks_introsort(uint32_t, m, aux);
170 // squeeze out identical types
171 for (i = 1, n_types = 1; i < m; ++i)
172 if (aux[i] != aux[i-1]) ++n_types;
173 if (n_types == 1 || (double)n_alt / n_tot < bca->min_frac || n_alt < bca->min_support) { // then skip
174 free(aux); return -1;
178 if (bam_verbose >= 2)
179 fprintf(stderr, "[%s] excessive INDEL alleles at position %d. Skip the position.\n", __func__, pos + 1);
182 types = (int*)calloc(n_types, sizeof(int));
184 types[t++] = aux[0] - MINUS_CONST;
185 for (i = 1; i < m; ++i)
186 if (aux[i] != aux[i-1])
187 types[t++] = aux[i] - MINUS_CONST;
189 for (t = 0; t < n_types; ++t)
190 if (types[t] == 0) break;
191 ref_type = t; // the index of the reference type (0)
193 { // calculate left and right boundary
194 left = pos > INDEL_WINDOW_SIZE? pos - INDEL_WINDOW_SIZE : 0;
195 right = pos + INDEL_WINDOW_SIZE;
196 if (types[0] < 0) right -= types[0];
197 // in case the alignments stand out the reference
198 for (i = pos; i < right; ++i)
199 if (ref[i] == 0) break;
202 /* The following block fixes a long-existing flaw in the INDEL
203 * calling model: the interference of nearby SNPs. However, it also
204 * reduces the power because sometimes, substitutions caused by
205 * indels are not distinguishable from true mutations. Multiple
206 * sequence realignment helps to increase the power.
208 { // construct per-sample consensus
209 int L = right - left + 1, max_i, max2_i;
210 uint32_t *cns, max, max2;
212 ref_sample = calloc(n, sizeof(void*));
215 for (i = 0; i < right - left; ++i)
216 ref0[i] = bam_nt16_table[(int)ref[i+left]];
217 for (s = 0; s < n; ++s) {
218 r = ref_sample[s] = calloc(L, 1);
219 memset(cns, 0, sizeof(int) * L);
220 // collect ref and non-ref counts
221 for (i = 0; i < n_plp[s]; ++i) {
222 bam_pileup1_t *p = plp[s] + i;
224 uint32_t *cigar = bam1_cigar(b);
225 uint8_t *seq = bam1_seq(b);
226 int x = b->core.pos, y = 0;
227 for (k = 0; k < b->core.n_cigar; ++k) {
228 int op = cigar[k]&0xf;
229 int j, l = cigar[k]>>4;
230 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
231 for (j = 0; j < l; ++j)
232 if (x + j >= left && x + j < right)
233 cns[x+j-left] += (bam1_seqi(seq, y+j) == ref0[x+j-left])? 1 : 0x10000;
235 } else if (op == BAM_CDEL || op == BAM_CREF_SKIP) x += l;
236 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
239 // determine the consensus
240 for (i = 0; i < right - left; ++i) r[i] = ref0[i];
241 max = max2 = 0; max_i = max2_i = -1;
242 for (i = 0; i < right - left; ++i) {
243 if (cns[i]>>16 >= max>>16) max2 = max, max2_i = max_i, max = cns[i], max_i = i;
244 else if (cns[i]>>16 >= max2>>16) max2 = cns[i], max2_i = i;
246 if ((double)(max&0xffff) / ((max&0xffff) + (max>>16)) >= 0.7) max_i = -1;
247 if ((double)(max2&0xffff) / ((max2&0xffff) + (max2>>16)) >= 0.7) max2_i = -1;
248 if (max_i >= 0) r[max_i] = 15;
249 if (max2_i >= 0) r[max2_i] = 15;
250 // for (i = 0; i < right - left; ++i) fputc("=ACMGRSVTWYHKDBN"[(int)r[i]], stderr); fputc('\n', stderr);
252 free(ref0); free(cns);
254 { // the length of the homopolymer run around the current position
255 int c = bam_nt16_table[(int)ref[pos + 1]];
256 if (c == 15) l_run = 1;
258 for (i = pos + 2; ref[i]; ++i)
259 if (bam_nt16_table[(int)ref[i]] != c) break;
261 for (i = pos; i >= 0; --i)
262 if (bam_nt16_table[(int)ref[i]] != c) break;
266 // construct the consensus sequence
267 max_ins = types[n_types - 1]; // max_ins is at least 0
269 int *inscns_aux = calloc(4 * n_types * max_ins, sizeof(int));
270 // count the number of occurrences of each base at each position for each type of insertion
271 for (t = 0; t < n_types; ++t) {
273 for (s = 0; s < n; ++s) {
274 for (i = 0; i < n_plp[s]; ++i) {
275 bam_pileup1_t *p = plp[s] + i;
276 if (p->indel == types[t]) {
277 uint8_t *seq = bam1_seq(p->b);
278 for (k = 1; k <= p->indel; ++k) {
279 int c = bam_nt16_nt4_table[bam1_seqi(seq, p->qpos + k)];
280 if (c < 4) ++inscns_aux[(t*max_ins+(k-1))*4 + c];
287 // use the majority rule to construct the consensus
288 inscns = calloc(n_types * max_ins, 1);
289 for (t = 0; t < n_types; ++t) {
290 for (j = 0; j < types[t]; ++j) {
291 int max = 0, max_k = -1, *ia = &inscns_aux[(t*max_ins+j)*4];
292 for (k = 0; k < 4; ++k)
294 max = ia[k], max_k = k;
295 inscns[t*max_ins + j] = max? max_k : 4;
300 // compute the likelihood given each type of indel for each read
301 max_ref2 = right - left + 2 + 2 * (max_ins > -types[0]? max_ins : -types[0]);
302 ref2 = calloc(max_ref2, 1);
303 query = calloc(right - left + max_rd_len + max_ins + 2, 1);
304 score1 = calloc(N * n_types, sizeof(int));
305 score2 = calloc(N * n_types, sizeof(int));
307 for (t = 0; t < n_types; ++t) {
309 kpa_par_t apf1 = { 1e-4, 1e-2, 10 }, apf2 = { 1e-6, 1e-3, 10 };
310 apf1.bw = apf2.bw = abs(types[t]) + 3;
312 if (types[t] == 0) ir = 0;
313 else if (types[t] > 0) ir = est_indelreg(pos, ref, types[t], &inscns[t*max_ins]);
314 else ir = est_indelreg(pos, ref, -types[t], 0);
315 if (ir > bca->indelreg) bca->indelreg = ir;
316 // fprintf(stderr, "%d, %d, %d\n", pos, types[t], ir);
318 for (s = K = 0; s < n; ++s) {
320 for (k = 0, j = left; j <= pos; ++j)
321 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
322 if (types[t] <= 0) j += -types[t];
323 else for (l = 0; l < types[t]; ++l)
324 ref2[k++] = inscns[t*max_ins + l];
325 for (; j < right && ref[j]; ++j)
326 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
327 for (; k < max_ref2; ++k) ref2[k] = 4;
328 if (j < right) right = j;
329 // align each read to ref2
330 for (i = 0; i < n_plp[s]; ++i, ++K) {
331 bam_pileup1_t *p = plp[s] + i;
332 int qbeg, qend, tbeg, tend, sc, kk;
333 uint8_t *seq = bam1_seq(p->b);
334 uint32_t *cigar = bam1_cigar(p->b);
335 if (p->b->core.flag&4) continue; // unmapped reads
336 // FIXME: the following loop should be better moved outside; nonetheless, realignment should be much slower anyway.
337 for (kk = 0; kk < p->b->core.n_cigar; ++kk)
338 if ((cigar[kk]&BAM_CIGAR_MASK) == BAM_CREF_SKIP) break;
339 if (kk < p->b->core.n_cigar) continue;
340 // FIXME: the following skips soft clips, but using them may be more sensitive.
341 // determine the start and end of sequences for alignment
342 qbeg = tpos2qpos(&p->b->core, bam1_cigar(p->b), left, 0, &tbeg);
343 qend = tpos2qpos(&p->b->core, bam1_cigar(p->b), right, 1, &tend);
346 tbeg = tbeg - l > left? tbeg - l : left;
348 // write the query sequence
349 for (l = qbeg; l < qend; ++l)
350 query[l - qbeg] = bam_nt16_nt4_table[bam1_seqi(seq, l)];
351 { // do realignment; this is the bottleneck
352 const uint8_t *qual = bam1_qual(p->b), *bq;
354 qq = calloc(qend - qbeg, 1);
355 bq = (uint8_t*)bam_aux_get(p->b, "ZQ");
356 if (bq) ++bq; // skip type
357 for (l = qbeg; l < qend; ++l) {
358 qq[l - qbeg] = bq? qual[l] + (bq[l] - 64) : qual[l];
359 if (qq[l - qbeg] > 30) qq[l - qbeg] = 30;
360 if (qq[l - qbeg] < 7) qq[l - qbeg] = 7;
362 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
363 (uint8_t*)query, qend - qbeg, qq, &apf1, 0, 0);
364 l = (int)(100. * sc / (qend - qbeg) + .499); // used for adjusting indelQ below
365 if (l > 255) l = 255;
366 score1[K*n_types + t] = score2[K*n_types + t] = sc<<8 | l;
368 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
369 (uint8_t*)query, qend - qbeg, qq, &apf2, 0, 0);
370 l = (int)(100. * sc / (qend - qbeg) + .499);
371 if (l > 255) l = 255;
372 score2[K*n_types + t] = sc<<8 | l;
377 for (l = 0; l < tend - tbeg + abs(types[t]); ++l)
378 fputc("ACGTN"[(int)ref2[tbeg-left+l]], stderr);
380 for (l = 0; l < qend - qbeg; ++l) fputc("ACGTN"[(int)query[l]], stderr);
382 fprintf(stderr, "pos=%d type=%d read=%d:%d name=%s qbeg=%d tbeg=%d score=%d\n", pos, types[t], s, i, bam1_qname(p->b), qbeg, tbeg, sc);
387 free(ref2); free(query);
390 sc = alloca(n_types * sizeof(int));
391 sumq = alloca(n_types * sizeof(int));
392 memset(sumq, 0, sizeof(int) * n_types);
393 for (s = K = 0; s < n; ++s) {
394 for (i = 0; i < n_plp[s]; ++i, ++K) {
395 bam_pileup1_t *p = plp[s] + i;
396 int *sct = &score1[K*n_types], indelQ1, indelQ2, seqQ, indelQ;
397 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
398 for (t = 1; t < n_types; ++t) // insertion sort
399 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
400 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
401 /* errmod_cal() assumes that if the call is wrong, the
402 * likelihoods of other events are equal. This is about
403 * right for substitutions, but is not desired for
404 * indels. To reuse errmod_cal(), I have to make
405 * compromise for multi-allelic indels.
407 if ((sc[0]&0x3f) == ref_type) {
408 indelQ1 = (sc[1]>>14) - (sc[0]>>14);
409 seqQ = est_seqQ(bca, types[sc[1]&0x3f], l_run);
411 for (t = 0; t < n_types; ++t) // look for the reference type
412 if ((sc[t]&0x3f) == ref_type) break;
413 indelQ1 = (sc[t]>>14) - (sc[0]>>14);
414 seqQ = est_seqQ(bca, types[sc[0]&0x3f], l_run);
416 tmp = sc[0]>>6 & 0xff;
417 indelQ1 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ1 + .499); // reduce indelQ
418 sct = &score2[K*n_types];
419 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
420 for (t = 1; t < n_types; ++t) // insertion sort
421 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
422 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
423 if ((sc[0]&0x3f) == ref_type) {
424 indelQ2 = (sc[1]>>14) - (sc[0]>>14);
426 for (t = 0; t < n_types; ++t) // look for the reference type
427 if ((sc[t]&0x3f) == ref_type) break;
428 indelQ2 = (sc[t]>>14) - (sc[0]>>14);
430 tmp = sc[0]>>6 & 0xff;
431 indelQ2 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ2 + .499);
432 // pick the smaller between indelQ1 and indelQ2
433 indelQ = indelQ1 < indelQ2? indelQ1 : indelQ2;
434 if (indelQ > 255) indelQ = 255;
435 if (seqQ > 255) seqQ = 255;
436 p->aux = (sc[0]&0x3f)<<16 | seqQ<<8 | indelQ; // use 22 bits in total
437 sumq[sc[0]&0x3f] += indelQ < seqQ? indelQ : seqQ;
438 // fprintf(stderr, "pos=%d read=%d:%d name=%s call=%d indelQ=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), types[sc[0]&0x3f], indelQ, seqQ);
441 // determine bca->indel_types[] and bca->inscns
442 bca->maxins = max_ins;
443 bca->inscns = realloc(bca->inscns, bca->maxins * 4);
444 for (t = 0; t < n_types; ++t)
445 sumq[t] = sumq[t]<<6 | t;
446 for (t = 1; t < n_types; ++t) // insertion sort
447 for (j = t; j > 0 && sumq[j] > sumq[j-1]; --j)
448 tmp = sumq[j], sumq[j] = sumq[j-1], sumq[j-1] = tmp;
449 for (t = 0; t < n_types; ++t) // look for the reference type
450 if ((sumq[t]&0x3f) == ref_type) break;
451 if (t) { // then move the reference type to the first
453 for (; t > 0; --t) sumq[t] = sumq[t-1];
456 for (t = 0; t < 4; ++t) bca->indel_types[t] = B2B_INDEL_NULL;
457 for (t = 0; t < 4 && t < n_types; ++t) {
458 bca->indel_types[t] = types[sumq[t]&0x3f];
459 memcpy(&bca->inscns[t * bca->maxins], &inscns[(sumq[t]&0x3f) * max_ins], bca->maxins);
462 for (s = n_alt = 0; s < n; ++s) {
463 for (i = 0; i < n_plp[s]; ++i) {
464 bam_pileup1_t *p = plp[s] + i;
465 int x = types[p->aux>>16&0x3f];
466 for (j = 0; j < 4; ++j)
467 if (x == bca->indel_types[j]) break;
468 p->aux = j<<16 | (j == 4? 0 : (p->aux&0xffff));
469 if ((p->aux>>16&0x3f) > 0) ++n_alt;
470 // fprintf(stderr, "X pos=%d read=%d:%d name=%s call=%d type=%d q=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), p->aux>>16&63, bca->indel_types[p->aux>>16&63], p->aux&0xff, p->aux>>8&0xff);
474 free(score1); free(score2);
476 for (i = 0; i < n; ++i) free(ref_sample[i]);
478 free(types); free(inscns);
479 return n_alt > 0? 0 : -1;