6 typedef struct __linkbuf_t {
9 struct __linkbuf_t *next;
12 /* --- BEGIN: Memory pool */
19 static mempool_t *mp_init()
22 mp = (mempool_t*)calloc(1, sizeof(mempool_t));
25 static void mp_destroy(mempool_t *mp)
28 for (k = 0; k < mp->n; ++k) {
29 free(mp->buf[k]->b.data);
35 static inline lbnode_t *mp_alloc(mempool_t *mp)
38 if (mp->n == 0) return (lbnode_t*)calloc(1, sizeof(lbnode_t));
39 else return mp->buf[--mp->n];
41 static inline void mp_free(mempool_t *mp, lbnode_t *p)
43 --mp->cnt; p->next = 0; // clear lbnode_t::next here
44 if (mp->n == mp->max) {
45 mp->max = mp->max? mp->max<<1 : 256;
46 mp->buf = (lbnode_t**)realloc(mp->buf, sizeof(lbnode_t*) * mp->max);
51 /* --- END: Memory pool */
53 /* --- BEGIN: Auxiliary functions */
55 static inline int resolve_cigar(bam_pileup1_t *p, uint32_t pos)
59 bam1_core_t *c = &b->core;
60 uint32_t x = c->pos, y = 0;
61 int ret = 1, is_restart = 1;
63 if (c->flag&BAM_FUNMAP) return 0; // unmapped read
65 p->qpos = -1; p->indel = 0; p->is_del = p->is_head = p->is_tail = 0;
66 for (k = 0; k < c->n_cigar; ++k) {
67 int op = bam1_cigar(b)[k] & BAM_CIGAR_MASK; // operation
68 int l = bam1_cigar(b)[k] >> BAM_CIGAR_SHIFT; // length
69 if (op == BAM_CMATCH) { // NOTE: this assumes the first and the last operation MUST BE a match or a clip
70 if (x + l > pos) { // overlap with pos
71 p->indel = p->is_del = 0;
72 p->qpos = y + (pos - x);
73 if (x == pos && is_restart) p->is_head = 1;
74 if (x + l - 1 == pos) { // come to the end of a match
75 if (k < c->n_cigar - 1) { // there are additional operation(s)
76 uint32_t cigar = bam1_cigar(b)[k+1]; // next CIGAR
77 int op_next = cigar&BAM_CIGAR_MASK; // next CIGAR operation
78 if (op_next == BAM_CDEL) p->indel = -(int32_t)(cigar>>BAM_CIGAR_SHIFT); // del
79 else if (op_next == BAM_CINS) p->indel = cigar>>BAM_CIGAR_SHIFT; // ins
80 if (op_next == BAM_CSOFT_CLIP || op_next == BAM_CREF_SKIP || op_next == BAM_CHARD_CLIP)
81 p->is_tail = 1; // tail
82 } else p->is_tail = 1; // this is the last operation; set tail
86 } else if (op == BAM_CDEL) { // then set ->is_del
88 p->indel = 0; p->is_del = 1;
89 p->qpos = y + (pos - x);
92 } else if (op == BAM_CREF_SKIP) x += l;
93 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
94 is_restart = (op == BAM_CREF_SKIP || op == BAM_CSOFT_CLIP || op == BAM_CHARD_CLIP);
96 if (op == BAM_CREF_SKIP) ret = 0; // then do not put it into pileup at all
104 /* --- END: Auxiliary functions */
106 struct __bam_plbuf_t {
108 lbnode_t *head, *tail, *dummy;
111 int32_t tid, pos, max_tid, max_pos;
117 void bam_plbuf_reset(bam_plbuf_t *buf)
120 buf->max_tid = buf->max_pos = -1;
121 buf->tid = buf->pos = 0;
123 for (p = buf->head; p->next;) {
128 buf->head = buf->tail;
131 void bam_plbuf_set_mask(bam_plbuf_t *buf, int mask)
133 if (mask < 0) buf->flag_mask = BAM_DEF_MASK;
134 else buf->flag_mask = BAM_FUNMAP | mask;
137 bam_plbuf_t *bam_plbuf_init(bam_pileup_f func, void *data)
140 buf = (bam_plbuf_t*)calloc(1, sizeof(bam_plbuf_t));
141 buf->func = func; buf->func_data = data;
143 buf->head = buf->tail = mp_alloc(buf->mp);
144 buf->dummy = mp_alloc(buf->mp);
145 buf->max_tid = buf->max_pos = -1;
146 buf->flag_mask = BAM_DEF_MASK;
150 void bam_plbuf_destroy(bam_plbuf_t *buf)
152 mp_free(buf->mp, buf->dummy);
153 mp_free(buf->mp, buf->head);
154 if (buf->mp->cnt != 0)
155 fprintf(stderr, "[bam_plbuf_destroy] memory leak: %d. Continue anyway.\n", buf->mp->cnt);
161 int bam_plbuf_push(const bam1_t *b, bam_plbuf_t *buf)
163 if (b) { // fill buffer
164 if (b->core.flag & buf->flag_mask) return 0;
165 bam_copy1(&buf->tail->b, b);
166 buf->tail->beg = b->core.pos; buf->tail->end = bam_calend(&b->core, bam1_cigar(b));
167 if (!(b->core.tid >= buf->max_tid || (b->core.tid == buf->max_tid && buf->tail->beg >= buf->max_pos))) {
168 fprintf(stderr, "[bam_pileup_core] the input is not sorted. Abort!\n");
171 buf->max_tid = b->core.tid; buf->max_pos = buf->tail->beg;
172 if (buf->tail->end > buf->pos) {
173 buf->tail->next = mp_alloc(buf->mp);
174 buf->tail = buf->tail->next;
176 } else buf->is_eof = 1;
177 while (buf->is_eof || buf->max_tid > buf->tid || (buf->max_tid == buf->tid && buf->max_pos > buf->pos)) {
180 buf->dummy->next = buf->head;
181 for (p = buf->head, q = buf->dummy; p->next; q = p, p = p->next) {
182 if (p->b.core.tid < buf->tid || (p->b.core.tid == buf->tid && p->end <= buf->pos)) { // then remove from the list
183 q->next = p->next; mp_free(buf->mp, p); p = q;
184 } else if (p->b.core.tid == buf->tid && p->beg <= buf->pos) { // here: p->end > pos; then add to pileup
185 if (n_pu == buf->max_pu) { // then double the capacity
186 buf->max_pu = buf->max_pu? buf->max_pu<<1 : 256;
187 buf->pu = (bam_pileup1_t*)realloc(buf->pu, sizeof(bam_pileup1_t) * buf->max_pu);
189 buf->pu[n_pu].b = &p->b;
190 if (resolve_cigar(buf->pu + n_pu, buf->pos)) ++n_pu; // skip the read if we are looking at BAM_CREF_SKIP
193 buf->head = buf->dummy->next; // dummy->next may be changed
194 if (n_pu) { // then call user defined function
195 buf->func(buf->tid, buf->pos, n_pu, buf->pu, buf->func_data);
197 // update tid and pos
198 if (buf->head->next) assert(buf->tid <= buf->head->b.core.tid); // otherwise, not sorted
199 if (buf->tid < buf->head->b.core.tid) { // come to a new reference sequence
200 buf->tid = buf->head->b.core.tid; buf->pos = buf->head->beg; // jump to the next reference
201 } else if (buf->pos < buf->head->beg) { // here: tid == head->b.core.tid
202 buf->pos = buf->head->beg; // jump to the next position
203 } else ++buf->pos; // scan contiguously
204 if (buf->is_eof && buf->head->next == 0) break;
209 int bam_pileup_file(bamFile fp, int mask, bam_pileup_f func, void *func_data)
214 b = (bam1_t*)calloc(1, sizeof(bam1_t));
215 buf = bam_plbuf_init(func, func_data);
216 bam_plbuf_set_mask(buf, mask);
217 while ((ret = bam_read1(fp, b)) >= 0)
218 bam_plbuf_push(b, buf);
219 bam_plbuf_push(0, buf);
220 bam_plbuf_destroy(buf);
221 free(b->data); free(b);