7 typedef struct __linkbuf_t {
10 struct __linkbuf_t *next;
13 /* --- BEGIN: Memory pool */
20 static mempool_t *mp_init()
23 mp = (mempool_t*)calloc(1, sizeof(mempool_t));
26 static void mp_destroy(mempool_t *mp)
29 for (k = 0; k < mp->n; ++k) {
30 free(mp->buf[k]->b.data);
36 static inline lbnode_t *mp_alloc(mempool_t *mp)
39 if (mp->n == 0) return (lbnode_t*)calloc(1, sizeof(lbnode_t));
40 else return mp->buf[--mp->n];
42 static inline void mp_free(mempool_t *mp, lbnode_t *p)
44 --mp->cnt; p->next = 0; // clear lbnode_t::next here
45 if (mp->n == mp->max) {
46 mp->max = mp->max? mp->max<<1 : 256;
47 mp->buf = (lbnode_t**)realloc(mp->buf, sizeof(lbnode_t*) * mp->max);
52 /* --- END: Memory pool */
54 /* --- BEGIN: Auxiliary functions */
56 static inline int resolve_cigar(bam_pileup1_t *p, uint32_t pos)
60 bam1_core_t *c = &b->core;
61 uint32_t x = c->pos, y = 0;
62 int ret = 1, is_restart = 1;
64 if (c->flag&BAM_FUNMAP) return 0; // unmapped read
65 assert(x <= pos); // otherwise a bug
66 p->qpos = -1; p->indel = 0; p->is_del = p->is_head = p->is_tail = 0;
67 for (k = 0; k < c->n_cigar; ++k) {
68 int op = bam1_cigar(b)[k] & BAM_CIGAR_MASK; // operation
69 int l = bam1_cigar(b)[k] >> BAM_CIGAR_SHIFT; // length
70 if (op == BAM_CMATCH) { // NOTE: this assumes the first and the last operation MUST BE a match or a clip
71 if (x + l > pos) { // overlap with pos
72 p->indel = p->is_del = 0;
73 p->qpos = y + (pos - x);
74 if (x == pos && is_restart) p->is_head = 1;
75 if (x + l - 1 == pos) { // come to the end of a match
76 if (k < c->n_cigar - 1) { // there are additional operation(s)
77 uint32_t cigar = bam1_cigar(b)[k+1]; // next CIGAR
78 int op_next = cigar&BAM_CIGAR_MASK; // next CIGAR operation
79 if (op_next == BAM_CDEL) p->indel = -(int32_t)(cigar>>BAM_CIGAR_SHIFT); // del
80 else if (op_next == BAM_CINS) p->indel = cigar>>BAM_CIGAR_SHIFT; // ins
81 if (op_next == BAM_CSOFT_CLIP || op_next == BAM_CREF_SKIP || op_next == BAM_CHARD_CLIP)
82 p->is_tail = 1; // tail
83 } else p->is_tail = 1; // this is the last operation; set tail
87 } else if (op == BAM_CDEL) { // then set ->is_del
89 p->indel = 0; p->is_del = 1;
90 p->qpos = y + (pos - x);
93 } else if (op == BAM_CREF_SKIP) x += l;
94 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
95 is_restart = (op == BAM_CREF_SKIP || op == BAM_CSOFT_CLIP || op == BAM_CHARD_CLIP);
97 if (op == BAM_CREF_SKIP) ret = 0; // then do not put it into pileup at all
101 assert(x > pos); // otherwise a bug
105 /* --- END: Auxiliary functions */
107 struct __bam_plbuf_t {
109 lbnode_t *head, *tail, *dummy;
112 int32_t tid, pos, max_tid, max_pos;
118 void bam_plbuf_reset(bam_plbuf_t *buf)
121 buf->max_tid = buf->max_pos = -1;
122 buf->tid = buf->pos = 0;
124 for (p = buf->head; p->next;) {
129 buf->head = buf->tail;
132 void bam_plbuf_set_mask(bam_plbuf_t *buf, int mask)
134 if (mask < 0) buf->flag_mask = BAM_DEF_MASK;
135 else buf->flag_mask = BAM_FUNMAP | mask;
138 bam_plbuf_t *bam_plbuf_init(bam_pileup_f func, void *data)
141 buf = (bam_plbuf_t*)calloc(1, sizeof(bam_plbuf_t));
142 buf->func = func; buf->func_data = data;
144 buf->head = buf->tail = mp_alloc(buf->mp);
145 buf->dummy = mp_alloc(buf->mp);
146 buf->max_tid = buf->max_pos = -1;
147 buf->flag_mask = BAM_DEF_MASK;
151 void bam_plbuf_destroy(bam_plbuf_t *buf)
153 mp_free(buf->mp, buf->dummy);
154 mp_free(buf->mp, buf->head);
155 if (buf->mp->cnt != 0)
156 fprintf(stderr, "[bam_plbuf_destroy] memory leak: %d. Continue anyway.\n", buf->mp->cnt);
162 int bam_plbuf_push(const bam1_t *b, bam_plbuf_t *buf)
164 if (b) { // fill buffer
165 if (b->core.tid < 0) return 0;
166 if (b->core.flag & buf->flag_mask) return 0;
167 bam_copy1(&buf->tail->b, b);
168 buf->tail->beg = b->core.pos; buf->tail->end = bam_calend(&b->core, bam1_cigar(b));
169 if (!(b->core.tid >= buf->max_tid || (b->core.tid == buf->max_tid && buf->tail->beg >= buf->max_pos))) {
170 fprintf(stderr, "[bam_pileup_core] the input is not sorted. Abort!\n");
173 buf->max_tid = b->core.tid; buf->max_pos = buf->tail->beg;
174 if (buf->tail->end > buf->pos || buf->tail->b.core.tid > buf->tid) {
175 buf->tail->next = mp_alloc(buf->mp);
176 buf->tail = buf->tail->next;
178 } else buf->is_eof = 1;
179 while (buf->is_eof || buf->max_tid > buf->tid || (buf->max_tid == buf->tid && buf->max_pos > buf->pos)) {
182 buf->dummy->next = buf->head;
183 for (p = buf->head, q = buf->dummy; p->next; q = p, p = p->next) {
184 if (p->b.core.tid < buf->tid || (p->b.core.tid == buf->tid && p->end <= buf->pos)) { // then remove from the list
185 q->next = p->next; mp_free(buf->mp, p); p = q;
186 } else if (p->b.core.tid == buf->tid && p->beg <= buf->pos) { // here: p->end > pos; then add to pileup
187 if (n_pu == buf->max_pu) { // then double the capacity
188 buf->max_pu = buf->max_pu? buf->max_pu<<1 : 256;
189 buf->pu = (bam_pileup1_t*)realloc(buf->pu, sizeof(bam_pileup1_t) * buf->max_pu);
191 buf->pu[n_pu].b = &p->b;
192 if (resolve_cigar(buf->pu + n_pu, buf->pos)) ++n_pu; // skip the read if we are looking at BAM_CREF_SKIP
195 buf->head = buf->dummy->next; // dummy->next may be changed
196 if (n_pu) { // then call user defined function
197 buf->func(buf->tid, buf->pos, n_pu, buf->pu, buf->func_data);
199 // update tid and pos
200 if (buf->head->next) {
201 if (buf->tid > buf->head->b.core.tid) {
202 fprintf(stderr, "[bam_plbuf_push] unsorted input. Pileup aborts.\n");
206 if (buf->tid < buf->head->b.core.tid) { // come to a new reference sequence
207 buf->tid = buf->head->b.core.tid; buf->pos = buf->head->beg; // jump to the next reference
208 } else if (buf->pos < buf->head->beg) { // here: tid == head->b.core.tid
209 buf->pos = buf->head->beg; // jump to the next position
210 } else ++buf->pos; // scan contiguously
211 if (buf->is_eof && buf->head->next == 0) break;
216 int bam_pileup_file(bamFile fp, int mask, bam_pileup_f func, void *func_data)
222 buf = bam_plbuf_init(func, func_data);
223 bam_plbuf_set_mask(buf, mask);
224 while ((ret = bam_read1(fp, b)) >= 0)
225 bam_plbuf_push(b, buf);
226 bam_plbuf_push(0, buf);
227 bam_plbuf_destroy(buf);