7 typedef struct __linkbuf_t {
10 struct __linkbuf_t *next;
13 /* --- BEGIN: Memory pool */
20 static mempool_t *mp_init()
23 mp = (mempool_t*)calloc(1, sizeof(mempool_t));
26 static void mp_destroy(mempool_t *mp)
29 for (k = 0; k < mp->n; ++k) {
30 free(mp->buf[k]->b.data);
36 static inline lbnode_t *mp_alloc(mempool_t *mp)
39 if (mp->n == 0) return (lbnode_t*)calloc(1, sizeof(lbnode_t));
40 else return mp->buf[--mp->n];
42 static inline void mp_free(mempool_t *mp, lbnode_t *p)
44 --mp->cnt; p->next = 0; // clear lbnode_t::next here
45 if (mp->n == mp->max) {
46 mp->max = mp->max? mp->max<<1 : 256;
47 mp->buf = (lbnode_t**)realloc(mp->buf, sizeof(lbnode_t*) * mp->max);
52 /* --- END: Memory pool */
54 /* --- BEGIN: Auxiliary functions */
56 static inline int resolve_cigar(bam_pileup1_t *p, uint32_t pos)
60 bam1_core_t *c = &b->core;
61 uint32_t x = c->pos, y = 0;
62 int ret = 1, is_restart = 1;
64 if (c->flag&BAM_FUNMAP) return 0; // unmapped read
65 assert(x <= pos); // otherwise a bug
66 p->qpos = -1; p->indel = 0; p->is_del = p->is_head = p->is_tail = 0;
67 for (k = 0; k < c->n_cigar; ++k) {
68 int op = bam1_cigar(b)[k] & BAM_CIGAR_MASK; // operation
69 int l = bam1_cigar(b)[k] >> BAM_CIGAR_SHIFT; // length
70 if (op == BAM_CMATCH) { // NOTE: this assumes the first and the last operation MUST BE a match or a clip
71 if (x + l > pos) { // overlap with pos
72 p->indel = p->is_del = 0;
73 p->qpos = y + (pos - x);
74 if (x == pos && is_restart) p->is_head = 1;
75 if (x + l - 1 == pos) { // come to the end of a match
76 if (k < c->n_cigar - 1) { // there are additional operation(s)
77 uint32_t cigar = bam1_cigar(b)[k+1]; // next CIGAR
78 int op_next = cigar&BAM_CIGAR_MASK; // next CIGAR operation
79 if (op_next == BAM_CDEL) p->indel = -(int32_t)(cigar>>BAM_CIGAR_SHIFT); // del
80 else if (op_next == BAM_CINS) p->indel = cigar>>BAM_CIGAR_SHIFT; // ins
81 if (op_next == BAM_CDEL || op_next == BAM_CINS) {
82 if (k + 2 < c->n_cigar) op_next = bam1_cigar(b)[k+2]&BAM_CIGAR_MASK;
85 if (op_next == BAM_CSOFT_CLIP || op_next == BAM_CREF_SKIP || op_next == BAM_CHARD_CLIP)
86 p->is_tail = 1; // tail
87 } else p->is_tail = 1; // this is the last operation; set tail
91 } else if (op == BAM_CDEL) { // then set ->is_del
93 p->indel = 0; p->is_del = 1;
94 p->qpos = y + (pos - x);
97 } else if (op == BAM_CREF_SKIP) x += l;
98 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
99 is_restart = (op == BAM_CREF_SKIP || op == BAM_CSOFT_CLIP || op == BAM_CHARD_CLIP);
101 if (op == BAM_CREF_SKIP) ret = 0; // then do not put it into pileup at all
105 assert(x > pos); // otherwise a bug
109 /* --- END: Auxiliary functions */
117 lbnode_t *head, *tail, *dummy;
118 int32_t tid, pos, max_tid, max_pos;
119 int is_eof, flag_mask, max_plp;
123 bam_plp_t bam_plp_init(void)
126 iter = calloc(1, sizeof(struct __bam_plp_t));
127 iter->mp = mp_init();
128 iter->head = iter->tail = mp_alloc(iter->mp);
129 iter->dummy = mp_alloc(iter->mp);
130 iter->max_tid = iter->max_pos = -1;
131 iter->flag_mask = BAM_DEF_MASK;
135 const bam_pileup1_t *bam_plp_next(bam_plp_t iter, int *_n_plp, int *_tid, int *_pos)
138 if (iter->is_eof && iter->head->next == 0) return 0;
139 while (iter->is_eof || iter->max_tid > iter->tid || (iter->max_tid == iter->tid && iter->max_pos > iter->pos)) {
142 // write iter->plp at iter->pos
143 iter->dummy->next = iter->head;
144 for (p = iter->head, q = iter->dummy; p->next; q = p, p = p->next) {
145 if (p->b.core.tid < iter->tid || (p->b.core.tid == iter->tid && p->end <= iter->pos)) { // then remove
146 q->next = p->next; mp_free(iter->mp, p); p = q;
147 } else if (p->b.core.tid == iter->tid && p->beg <= iter->pos) { // here: p->end > pos; then add to pileup
148 if (n_plp == iter->max_plp) { // then double the capacity
149 iter->max_plp = iter->max_plp? iter->max_plp<<1 : 256;
150 iter->plp = (bam_pileup1_t*)realloc(iter->plp, sizeof(bam_pileup1_t) * iter->max_plp);
152 iter->plp[n_plp].b = &p->b;
153 if (resolve_cigar(iter->plp + n_plp, iter->pos)) ++n_plp; // skip the read if we are looking at ref-skip
156 iter->head = iter->dummy->next; // dummy->next may be changed
157 *_n_plp = n_plp; *_tid = iter->tid; *_pos = iter->pos;
158 // update iter->tid and iter->pos
159 if (iter->head->next) {
160 if (iter->tid > iter->head->b.core.tid) {
161 fprintf(stderr, "[%s] unsorted input. Pileup aborts.\n", __func__);
166 if (iter->tid < iter->head->b.core.tid) { // come to a new reference sequence
167 iter->tid = iter->head->b.core.tid; iter->pos = iter->head->beg; // jump to the next reference
168 } else if (iter->pos < iter->head->beg) { // here: tid == head->b.core.tid
169 iter->pos = iter->head->beg; // jump to the next position
170 } else ++iter->pos; // scan contiguously
172 if (n_plp) return iter->plp;
173 if (iter->is_eof && iter->head->next == 0) break;
178 int bam_plp_push(bam_plp_t iter, const bam1_t *b)
181 if (b->core.tid < 0) return 0;
182 if (b->core.flag & iter->flag_mask) return 0;
183 bam_copy1(&iter->tail->b, b);
184 iter->tail->beg = b->core.pos; iter->tail->end = bam_calend(&b->core, bam1_cigar(b));
185 if (b->core.tid < iter->max_tid) {
186 fprintf(stderr, "[bam_pileup_core] the input is not sorted (chromosomes out of order)\n");
189 if ((b->core.tid == iter->max_tid) && (iter->tail->beg < iter->max_pos)) {
190 fprintf(stderr, "[bam_pileup_core] the input is not sorted (reads out of order)\n");
193 iter->max_tid = b->core.tid; iter->max_pos = iter->tail->beg;
194 if (iter->tail->end > iter->pos || iter->tail->b.core.tid > iter->tid) {
195 iter->tail->next = mp_alloc(iter->mp);
196 iter->tail = iter->tail->next;
198 } else iter->is_eof = 1;
202 void bam_plp_reset(bam_plp_t iter)
205 iter->max_tid = iter->max_pos = -1;
206 iter->tid = iter->pos = 0;
208 for (p = iter->head; p->next;) {
210 mp_free(iter->mp, p);
213 iter->head = iter->tail;
216 void bam_plp_set_mask(bam_plp_t iter, int mask)
218 iter->flag_mask = mask < 0? BAM_DEF_MASK : (BAM_FUNMAP | mask);
221 void bam_plp_destroy(bam_plp_t iter)
223 mp_free(iter->mp, iter->dummy);
224 mp_free(iter->mp, iter->head);
225 if (iter->mp->cnt != 0)
226 fprintf(stderr, "[bam_plp_destroy] memory leak: %d. Continue anyway.\n", iter->mp->cnt);
227 mp_destroy(iter->mp);
236 int bam_pileup_file(bamFile fp, int mask, bam_pileup_f func, void *func_data)
242 buf = bam_plbuf_init(func, func_data);
243 bam_plbuf_set_mask(buf, mask);
244 while ((ret = bam_read1(fp, b)) >= 0)
245 bam_plbuf_push(b, buf);
246 bam_plbuf_push(0, buf);
247 bam_plbuf_destroy(buf);
252 void bam_plbuf_set_mask(bam_plbuf_t *buf, int mask)
254 bam_plp_set_mask(buf->iter, mask);
257 void bam_plbuf_reset(bam_plbuf_t *buf)
259 bam_plp_reset(buf->iter);
262 bam_plbuf_t *bam_plbuf_init(bam_pileup_f func, void *data)
265 buf = calloc(1, sizeof(bam_plbuf_t));
266 buf->iter = bam_plp_init();
272 void bam_plbuf_destroy(bam_plbuf_t *buf)
274 bam_plp_destroy(buf->iter);
278 int bam_plbuf_push(const bam1_t *b, bam_plbuf_t *buf)
280 int ret, n_plp, tid, pos;
281 const bam_pileup1_t *plp;
282 ret = bam_plp_push(buf->iter, b);
283 if (ret < 0) return ret;
284 while ((plp = bam_plp_next(buf->iter, &n_plp, &tid, &pos)) != 0)
285 buf->func(tid, pos, n_plp, plp, buf->data);