11 KSTREAM_INIT(gzFile, gzread, 16384)
14 #define FLIP_PENALTY 2
18 #define FLAG_FIX_CHIMERA 0x1
19 #define FLAG_LIST_EXCL 0x4
22 // configurations, initialized in the main function
23 int flag, k, min_baseQ, min_varLOD, max_depth;
24 // other global variables
35 int8_t seq[MAX_VARS]; // TODO: change to dynamic memory allocation!
37 uint32_t vlen:16, single:1, flip:1, phase:1, phased:1;
40 #define rseq_lt(a,b) ((a)->vpos < (b)->vpos)
43 KHASH_SET_INIT_INT64(set64)
44 KHASH_MAP_INIT_INT64(64, frag_t)
46 typedef khash_t(64) nseq_t;
49 KSORT_INIT(rseq, frag_p, rseq_lt)
51 static char nt16_nt4_table[] = { 4, 0, 1, 4, 2, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4 };
53 static inline uint64_t X31_hash_string(const char *s)
56 if (h) for (++s ; *s; ++s) h = (h << 5) - h + *s;
60 static void count1(int l, const uint8_t *seq, int *cnt)
64 if (seq[l-1] == 0) return; // do nothing is the last base is ambiguous
65 for (i = n_ambi = 0; i < l; ++i) // collect ambiguous bases
66 if (seq[i] == 0) ++n_ambi;
67 if (l - n_ambi <= 1) return; // only one SNP
68 for (x = 0; x < 1u<<n_ambi; ++x) { // count
69 for (i = j = 0, z = 0; i < l; ++i) {
71 if (seq[i]) c = seq[i] - 1;
82 static int **count_all(int l, int vpos, nseq_t *hash)
88 cnt = calloc(vpos, sizeof(void*));
89 for (i = 0; i < vpos; ++i) cnt[i] = calloc(1<<l, sizeof(int));
90 for (k = 0; k < kh_end(hash); ++k) {
91 if (kh_exist(hash, k)) {
92 frag_t *f = &kh_val(hash, k);
93 if (f->vpos >= vpos || f->single) continue; // out of region; or singleton
94 if (f->vlen == 1) { // such reads should be flagged as deleted previously if everything is right
98 for (j = 1; j < f->vlen; ++j) {
99 for (i = 0; i < l; ++i)
100 seq[i] = j < l - 1 - i? 0 : f->seq[j - (l - 1 - i)];
101 count1(l, seq, cnt[f->vpos + j]);
110 static int8_t *dynaprog(int l, int vpos, int **w)
112 int *f[2], *curr, *prev, max, i;
114 uint32_t x, z = 1u<<(l-1), mask = (1u<<l) - 1;
115 f[0] = calloc(z, sizeof(int));
116 f[1] = calloc(z, sizeof(int));
117 b = calloc(vpos, sizeof(void*));
118 prev = f[0]; curr = f[1];
119 // fill the backtrack matrix
120 for (i = 0; i < vpos; ++i) {
121 int *wi = w[i], *tmp;
123 bi = b[i] = calloc(z, 1);
124 /* In the following, x is the current state, which is the
125 * lexicographically smaller local haplotype. xc is the complement of
126 * x, or the larger local haplotype; y0 and y1 are the two predecessors
128 for (x = 0; x < z; ++x) { // x0 is the smaller
131 xc = ~x&mask; y0 = x>>1; y1 = xc>>1;
132 c0 = prev[y0] + wi[x] + wi[xc];
133 c1 = prev[y1] + wi[x] + wi[xc];
134 if (c0 > c1) bi[x] = 0, curr[x] = c0;
135 else bi[x] = 1, curr[x] = c1;
137 tmp = prev; prev = curr; curr = tmp; // swap
143 for (x = 0, max = 0, max_x = 0; x < z; ++x)
144 if (prev[x] > max) max = prev[x], max_x = x;
145 for (i = vpos - 1, x = max_x; i >= 0; --i) {
146 h[i] = which? (~x&1) : (x&1);
147 which = b[i][x]? !which : which;
148 x = b[i][x]? (~x&mask)>>1 : x>>1;
152 for (i = 0; i < vpos; ++i) free(b[i]);
153 free(f[0]); free(f[1]); free(b);
157 // phase each fragment
158 static uint64_t *fragphase(int vpos, const int8_t *path, nseq_t *hash, int flip)
162 uint32_t *left, *rght, max;
163 left = rght = 0; max = 0;
164 pcnt = calloc(vpos, 8);
165 for (k = 0; k < kh_end(hash); ++k) {
166 if (kh_exist(hash, k)) {
168 frag_t *f = &kh_val(hash, k);
169 if (f->vpos >= vpos) continue;
172 for (i = 0; i < f->vlen; ++i) {
173 if (f->seq[i] == 0) continue;
174 ++c[f->seq[i] == path[f->vpos + i] + 1? 0 : 1];
176 f->phase = c[0] > c[1]? 0 : 1;
177 f->phased = c[0] == c[1]? 0 : 1;
180 if (flip && c[0] >= 3 && c[1] >= 3) {
181 int sum[2], m, mi, md;
182 if (f->vlen > max) { // enlarge the array
185 left = realloc(left, max * 4);
186 rght = realloc(rght, max * 4);
188 for (i = 0, sum[0] = sum[1] = 0; i < f->vlen; ++i) { // get left counts
190 int c = f->phase? 2 - f->seq[i] : f->seq[i] - 1;
191 ++sum[c == path[f->vpos + i]? 0 : 1];
193 left[i] = sum[1]<<16 | sum[0];
195 for (i = f->vlen - 1, sum[0] = sum[1] = 0; i >= 0; --i) { // get right counts
197 int c = f->phase? 2 - f->seq[i] : f->seq[i] - 1;
198 ++sum[c == path[f->vpos + i]? 0 : 1];
200 rght[i] = sum[1]<<16 | sum[0];
202 // find the best flip point
203 for (i = m = 0, mi = -1, md = -1; i < f->vlen - 1; ++i) {
205 a[0] = (left[i]&0xffff) + (rght[i+1]>>16&0xffff) - (rght[i+1]&0xffff) * FLIP_PENALTY;
206 a[1] = (left[i]>>16&0xffff) + (rght[i+1]&0xffff) - (rght[i+1]>>16&0xffff) * FLIP_PENALTY;
208 if (a[0] > m) m = a[0], md = 0, mi = i;
210 if (a[1] > m) m = a[1], md = 1, mi = i;
213 if (m - c[0] >= FLIP_THRES && m - c[1] >= FLIP_THRES) { // then flip
215 if (md == 0) { // flip the tail
216 for (i = mi + 1; i < f->vlen; ++i)
217 if (f->seq[i] == 1) f->seq[i] = 2;
218 else if (f->seq[i] == 2) f->seq[i] = 1;
219 } else { // flip the head
220 for (i = 0; i <= mi; ++i)
221 if (f->seq[i] == 1) f->seq[i] = 2;
222 else if (f->seq[i] == 2) f->seq[i] = 1;
228 for (i = 0; i < f->vlen; ++i) {
230 if (f->seq[i] == 0) continue;
231 c = f->phase? 2 - f->seq[i] : f->seq[i] - 1;
232 if (c == path[f->vpos + i]) {
233 if (f->phase == 0) ++pcnt[f->vpos + i];
234 else pcnt[f->vpos + i] += 1ull<<32;
236 if (f->phase == 0) pcnt[f->vpos + i] += 1<<16;
237 else pcnt[f->vpos + i] += 1ull<<48;
243 free(left); free(rght);
247 static uint64_t *genmask(int vpos, const uint64_t *pcnt, int *_n)
249 int i, max = 0, max_i = -1, m = 0, n = 0, beg = 0, score = 0;
251 for (i = 0; i < vpos; ++i) {
252 uint64_t x = pcnt[i];
253 int c[4], pre = score, s;
254 c[0] = x&0xffff; c[1] = x>>16&0xffff; c[2] = x>>32&0xffff; c[3] = x>>48&0xffff;
255 s = (c[1] + c[3] == 0)? -(c[0] + c[2]) : (c[1] + c[3] - 1);
256 if (c[3] > c[2]) s += c[3] - c[2];
257 if (c[1] > c[0]) s += c[1] - c[0];
259 if (score < 0) score = 0;
260 if (pre == 0 && score > 0) beg = i; // change from zero to non-zero
261 if ((i == vpos - 1 || score == 0) && max >= MASK_THRES) {
264 list = realloc(list, m * 8);
266 list[n++] = (uint64_t)beg<<32 | max_i;
267 i = max_i; // reset i to max_i
269 } else if (score > max) max = score, max_i = i;
270 if (score == 0) max = 0;
276 // trim heading and tailing ambiguous bases; mark deleted and remove sequence
277 static int clean_seqs(int vpos, nseq_t *hash)
281 for (k = 0; k < kh_end(hash); ++k) {
282 if (kh_exist(hash, k)) {
283 frag_t *f = &kh_val(hash, k);
285 if (f->vpos >= vpos) {
289 for (i = 0; i < f->vlen; ++i)
290 if (f->seq[i] != 0) break;
292 for (i = f->vlen - 1; i >= 0; --i)
293 if (f->seq[i] != 0) break;
295 if (end - beg <= 0) kh_del(64, hash, k);
297 if (beg != 0) memmove(f->seq, f->seq + beg, end - beg);
298 f->vpos += beg; f->vlen = end - beg;
299 f->single = f->vlen == 1? 1 : 0;
306 static void dump_aln(phaseg_t *g, int min_pos, const nseq_t *hash)
309 is_flip = (drand48() < 0.5);
310 for (i = 0; i < g->n; ++i) {
315 key = X31_hash_string(bam1_qname(b));
316 end = bam_calend(&b->core, bam1_cigar(b));
317 if (end > min_pos) break;
318 k = kh_get(64, hash, key);
319 if (k == kh_end(hash)) which = 3;
321 frag_t *f = &kh_val(hash, k);
322 if (f->phased && f->flip) which = 2;
323 else if (f->phased == 0) which = 3;
324 else { // phased and not flipped
327 bam_aux_append(b, "ZP", 'A', 1, (uint8_t*)&c);
329 if (which < 2 && is_flip) which = 1 - which; // increase the randomness
331 if (which == 3) which = (drand48() < 0.5);
332 bam_write1(g->out[which], b);
336 memmove(g->b, g->b + i, (g->n - i) * sizeof(void*));
340 static int phase(phaseg_t *g, const char *chr, int vpos, uint64_t *cns, nseq_t *hash)
342 int i, j, n_seqs = kh_size(hash), n_masked = 0, min_pos;
345 int8_t *path, *sitemask;
346 uint64_t *pcnt, *regmask;
348 if (vpos == 0) return 0;
349 i = clean_seqs(vpos, hash); // i is true if hash has an element with its vpos >= vpos
350 min_pos = i? cns[vpos]>>32 : 0x7fffffff;
352 printf("PS\t%s\t%d\t%d\n", chr, (int)(cns[0]>>32) + 1, (int)(cns[0]>>32) + 1);
353 printf("M0\t%s\t%d\t%d\t%c\t%c\t%d\t0\t0\t0\t0\n//\n", chr, (int)(cns[0]>>32) + 1, (int)(cns[0]>>32) + 1,
354 "ACGTX"[cns[0]&3], "ACGTX"[cns[0]>>16&3], g->vpos_shift + 1);
355 for (k = 0; k < kh_end(hash); ++k) {
356 if (kh_exist(hash, k)) {
357 frag_t *f = &kh_val(hash, k);
358 if (f->vpos) continue;
360 if (f->seq[0] == 0) f->phased = 0;
361 else f->phased = 1, f->phase = f->seq[0] - 1;
364 dump_aln(g, min_pos, hash);
371 printf("PS\t%s\t%d\t%d\n", chr, (int)(cns[0]>>32) + 1, (int)(cns[vpos-1]>>32) + 1);
372 sitemask = calloc(vpos, 1);
373 cnt = count_all(g->k, vpos, hash);
374 path = dynaprog(g->k, vpos, cnt);
375 for (i = 0; i < vpos; ++i) free(cnt[i]);
377 pcnt = fragphase(vpos, path, hash, 0); // do not fix chimeras when masking
378 mask = genmask(vpos, pcnt, &n_masked);
379 regmask = calloc(n_masked, 8);
380 for (i = 0; i < n_masked; ++i) {
381 regmask[i] = cns[mask[i]>>32]>>32<<32 | cns[(uint32_t)mask[i]]>>32;
382 for (j = mask[i]>>32; j <= (int32_t)mask[i]; ++j)
386 if (g->flag & FLAG_FIX_CHIMERA) {
388 pcnt = fragphase(vpos, path, hash, 1);
391 for (i = 0; i < n_masked; ++i)
392 printf("FL\t%s\t%d\t%d\n", chr, (int)(regmask[i]>>32) + 1, (int)regmask[i] + 1);
393 for (i = 0; i < vpos; ++i) {
394 uint64_t x = pcnt[i];
396 c[0] = (cns[i]&0xffff)>>2 == 0? 4 : (cns[i]&3);
397 c[1] = (cns[i]>>16&0xffff)>>2 == 0? 4 : (cns[i]>>16&3);
398 printf("M%d\t%s\t%d\t%d\t%c\t%c\t%d\t%d\t%d\t%d\t%d\n", sitemask[i]+1, chr, (int)(cns[0]>>32), (int)(cns[i]>>32) + 1, "ACGTX"[c[path[i]]], "ACGTX"[c[1-path[i]]],
399 i + g->vpos_shift + 1, (int)(x&0xffff), (int)(x>>16&0xffff), (int)(x>>32&0xffff), (int)(x>>48&0xffff));
401 free(path); free(pcnt); free(regmask); free(sitemask);
402 seqs = calloc(n_seqs, sizeof(void*));
403 for (k = 0, i = 0; k < kh_end(hash); ++k)
404 if (kh_exist(hash, k) && kh_val(hash, k).vpos < vpos && !kh_val(hash, k).single)
405 seqs[i++] = &kh_val(hash, k);
407 ks_introsort_rseq(n_seqs, seqs);
408 for (i = 0; i < n_seqs; ++i) {
410 printf("EV\t0\t%s\t%d\t40\t%dM\t*\t0\t0\t", chr, f->vpos + 1 + g->vpos_shift, f->vlen);
411 for (j = 0; j < f->vlen; ++j) {
412 uint32_t c = cns[f->vpos + j];
413 if (f->seq[j] == 0) putchar('N');
414 else putchar("ACGT"[f->seq[j] == 1? (c&3) : (c>>16&3)]);
416 printf("\t*\tYP:i:%d\tYF:i:%d\n", f->phase, f->flip);
421 g->vpos_shift += vpos;
422 dump_aln(g, min_pos, hash);
426 static void update_vpos(int vpos, nseq_t *hash)
429 for (k = 0; k < kh_end(hash); ++k) {
430 if (kh_exist(hash, k)) {
431 frag_t *f = &kh_val(hash, k);
432 if (f->vpos < vpos) kh_del(64, hash, k); // TODO: if frag_t::seq is allocated dynamically, free it
433 else f->vpos -= vpos;
438 static nseq_t *shrink_hash(nseq_t *hash) // TODO: to implement
443 static int readaln(void *data, bam1_t *b)
445 phaseg_t *g = (phaseg_t*)data;
447 ret = bam_read1(g->fp, b);
448 if (!(b->core.flag & (BAM_FUNMAP|BAM_FSECONDARY|BAM_FQCFAIL|BAM_FDUP)) && g->pre) {
450 g->m = g->m? g->m<<1 : 16;
451 g->b = realloc(g->b, g->m * sizeof(void*));
453 g->b[g->n++] = bam_dup1(b);
458 static khash_t(set64) *loadpos(const char *fn, bam_header_t *h)
464 khash_t(set64) *hash;
466 hash = kh_init(set64);
467 str = calloc(1, sizeof(kstring_t));
468 fp = strcmp(fn, "-")? gzopen(fn, "r") : gzdopen(fileno(stdin), "r");
470 while (ks_getuntil(ks, 0, str, &dret) >= 0) {
471 int tid = bam_get_tid(h, str->s);
472 if (tid >= 0 && dret != '\n') {
473 if (ks_getuntil(ks, 0, str, &dret) >= 0) {
474 uint64_t x = (uint64_t)tid<<32 | (atoi(str->s) - 1);
475 kh_put(set64, hash, x, &ret);
478 if (dret != '\n') while ((dret = ks_getc(ks)) > 0 && dret != '\n');
483 free(str->s); free(str);
487 static int gl2cns(float q[16])
491 min = min2 = 1e30; min_ij = -1;
492 for (i = 0; i < 4; ++i) {
493 for (j = i; j < 4; ++j) {
494 if (q[i<<2|j] < min) min_ij = i<<2|j, min2 = min, min = q[i<<2|j];
495 else if (q[i<<2|j] < min2) min2 = q[i<<2|j];
498 return (min_ij>>2&3) == (min_ij&3)? 0 : 1<<18 | (min_ij>>2&3)<<16 | (min_ij&3) | (int)(min2 - min + .499) << 2;
501 int main_phase(int argc, char *argv[])
503 extern void bam_init_header_hash(bam_header_t *header);
504 int c, tid, pos, vpos = 0, n, lasttid = -1, max_vpos = 0;
505 const bam_pileup1_t *plp;
512 khash_t(set64) *set = 0;
516 memset(&g, 0, sizeof(phaseg_t));
517 g.flag = FLAG_FIX_CHIMERA;
518 g.min_varLOD = 40; g.k = 13; g.min_baseQ = 13; g.max_depth = 256;
519 while ((c = getopt(argc, argv, "Q:eFq:k:b:l:D:")) >= 0) {
521 case 'D': g.max_depth = atoi(optarg); break;
522 case 'q': g.min_varLOD = atoi(optarg); break;
523 case 'Q': g.min_baseQ = atoi(optarg); break;
524 case 'k': g.k = atoi(optarg); break;
525 case 'F': g.flag &= ~FLAG_FIX_CHIMERA; break;
526 case 'e': g.flag |= FLAG_LIST_EXCL; break;
527 case 'b': g.pre = strdup(optarg); break;
528 case 'l': fn_list = strdup(optarg); break;
531 if (argc == optind) {
532 fprintf(stderr, "\n");
533 fprintf(stderr, "Usage: samtools phase [options] <in.bam>\n\n");
534 fprintf(stderr, "Options: -k INT block length [%d]\n", g.k);
535 fprintf(stderr, " -b STR prefix of BAMs to output [null]\n");
536 fprintf(stderr, " -q INT min het phred-LOD [%d]\n", g.min_varLOD);
537 fprintf(stderr, " -Q INT min base quality in het calling [%d]\n", g.min_baseQ);
538 fprintf(stderr, " -D INT max read depth [%d]\n", g.max_depth);
539 // fprintf(stderr, " -l FILE list of sites to phase [null]\n");
540 fprintf(stderr, " -F do not attempt to fix chimeras\n");
541 // fprintf(stderr, " -e do not discover SNPs (effective with -l)\n");
542 fprintf(stderr, "\n");
545 g.fp = strcmp(argv[optind], "-")? bam_open(argv[optind], "r") : bam_dopen(fileno(stdin), "r");
546 h = bam_header_read(g.fp);
547 if (fn_list) { // read the list of sites to phase
548 bam_init_header_hash(h);
549 set = loadpos(fn_list, h);
551 } else g.flag &= ~FLAG_LIST_EXCL;
552 if (g.pre) { // open BAMs to write
553 char *s = malloc(strlen(g.pre) + 20);
554 strcpy(s, g.pre); strcat(s, ".0.bam"); g.out[0] = bam_open(s, "w");
555 strcpy(s, g.pre); strcat(s, ".1.bam"); g.out[1] = bam_open(s, "w");
556 strcpy(s, g.pre); strcat(s, ".chimera.bam"); g.out[2] = bam_open(s, "w");
557 for (c = 0; c <= 2; ++c) bam_header_write(g.out[c], h);
561 iter = bam_plp_init(readaln, &g);
564 em = errmod_init(1. - 0.83);
565 bases = calloc(g.max_depth, 2);
567 printf("CC\tDescriptions:\nCC\n");
568 printf("CC\t CC comments\n");
569 printf("CC\t PS start of a phase set\n");
570 printf("CC\t FL filtered region\n");
571 printf("CC\t M[012] markers; 0 for singletons, 1 for phased and 2 for filtered\n");
572 printf("CC\t EV supporting reads; SAM format\n");
573 printf("CC\t // end of a phase set\nCC\n");
574 printf("CC\tFormats of PS, FL and M[012] lines (1-based coordinates):\nCC\n");
575 printf("CC\t PS chr phaseSetStart phaseSetEnd\n");
576 printf("CC\t FL chr filterStart filterEnd\n");
577 printf("CC\t M? chr PS pos allele0 allele1 hetIndex #supports0 #errors0 #supp1 #err1\n");
580 while ((plp = bam_plp_auto(iter, &tid, &pos, &n)) != 0) {
581 int i, k, c, tmp, dophase = 1, in_set = 0;
584 if (tid != lasttid) { // change of chromosome
587 seqs = shrink_hash(seqs);
588 phase(&g, h->target_name[lasttid], vpos, cns, seqs);
589 update_vpos(0x7fffffff, seqs);
594 if (set && kh_get(set64, set, (uint64_t)tid<<32 | pos) != kh_end(set)) in_set = 1;
595 if (n > g.max_depth) continue; // do not proceed if the depth is too high
596 // fill the bases array and check if there is a variant
597 for (i = k = 0; i < n; ++i) {
598 const bam_pileup1_t *p = plp + i;
601 if (p->is_del || p->is_refskip) continue;
602 baseQ = bam1_qual(p->b)[p->qpos];
603 if (baseQ < g.min_baseQ) continue;
604 seq = bam1_seq(p->b);
605 b = bam_nt16_nt4_table[bam1_seqi(seq, p->qpos)];
607 q = baseQ < p->b->core.qual? baseQ : p->b->core.qual;
610 bases[k++] = q<<5 | (int)bam1_strand(p->b)<<4 | b;
612 if (k == 0) continue;
613 errmod_cal(em, k, 4, bases, q); // compute genotype likelihood
614 c = gl2cns(q); // get the consensus
615 // tell if to proceed
616 if (set && (g.flag&FLAG_LIST_EXCL) && !in_set) continue; // not in the list
617 if (!in_set && (c&0xffff)>>2 < g.min_varLOD) continue; // not a variant
619 if (vpos == max_vpos) {
620 max_vpos = max_vpos? max_vpos<<1 : 128;
621 cns = realloc(cns, max_vpos * 8);
623 cns[vpos] = (uint64_t)pos<<32 | c;
624 for (i = 0; i < n; ++i) {
625 const bam_pileup1_t *p = plp + i;
628 uint8_t *seq = bam1_seq(p->b);
630 if (p->is_del || p->is_refskip) continue;
631 if (p->b->core.qual == 0) continue;
633 c = nt16_nt4_table[(int)bam1_seqi(seq, p->qpos)];
634 if (c == (cns[vpos]&3)) c = 1;
635 else if (c == (cns[vpos]>>16&3)) c = 2;
638 key = X31_hash_string(bam1_qname(p->b));
639 k = kh_put(64, seqs, key, &tmp);
640 f = &kh_val(seqs, k);
641 if (tmp == 0) { // present in the hash table
642 if (vpos - f->vpos + 1 < MAX_VARS) {
643 f->vlen = vpos - f->vpos + 1;
644 f->seq[f->vlen-1] = c;
645 f->end = bam_calend(&p->b->core, bam1_cigar(p->b));
649 memset(f->seq, 0, MAX_VARS);
650 f->beg = p->b->core.pos;
651 f->end = bam_calend(&p->b->core, bam1_cigar(p->b));
652 f->vpos = vpos, f->vlen = 1, f->seq[0] = c, f->single = f->phased = f->flip = 0;
656 seqs = shrink_hash(seqs);
657 phase(&g, h->target_name[tid], vpos, cns, seqs);
658 update_vpos(vpos, seqs);
664 if (tid >= 0) phase(&g, h->target_name[tid], vpos, cns, seqs);
665 bam_header_destroy(h);
666 bam_plp_destroy(iter);
668 kh_destroy(64, seqs);
669 kh_destroy(set64, set);
674 for (c = 0; c <= 2; ++c) bam_close(g.out[c]);
675 free(g.pre); free(g.b);