6 #include "bam_endian.h"
14 Alignment indexing. Before indexing, BAM must be sorted based on the
15 leftmost coordinate of alignments. In indexing, BAM uses two indices:
16 a UCSC binning index and a simple linear index. The binning index is
17 efficient for alignments spanning long distance, while the auxiliary
18 linear index helps to reduce unnecessary seek calls especially for
21 The UCSC binning scheme was suggested by Richard Durbin and Lincoln
22 Stein and is explained by Kent et al. (2002). In this scheme, each bin
23 represents a contiguous genomic region which can be fully contained in
24 another bin; each alignment is associated with a bin which represents
25 the smallest region containing the entire alignment. The binning
26 scheme is essentially another representation of R-tree. A distinct bin
27 uniquely corresponds to a distinct internal node in a R-tree. Bin A is
28 a child of Bin B if region A is contained in B.
30 In BAM, each bin may span 2^29, 2^26, 2^23, 2^20, 2^17 or 2^14 bp. Bin
31 0 spans a 512Mbp region, bins 1-8 span 64Mbp, 9-72 8Mbp, 73-584 1Mbp,
32 585-4680 128Kbp and bins 4681-37449 span 16Kbp regions. If we want to
33 find the alignments overlapped with a region [rbeg,rend), we need to
34 calculate the list of bins that may be overlapped the region and test
35 the alignments in the bins to confirm the overlaps. If the specified
36 region is short, typically only a few alignments in six bins need to
37 be retrieved. The overlapping alignments can be quickly fetched.
41 #define BAM_MIN_CHUNK_GAP 32768
42 // 1<<14 is the size of minimum bin.
43 #define BAM_LIDX_SHIFT 14
45 #define BAM_MAX_BIN 37450 // =(8^6-1)/7+1
51 #define pair64_lt(a,b) ((a).u < (b).u)
52 KSORT_INIT(off, pair64_t, pair64_lt)
64 KHASH_MAP_INIT_INT(i, bam_binlist_t)
66 struct __bam_index_t {
68 uint64_t n_no_coor; // unmapped reads without coordinate
73 // requirement: len <= LEN_MASK
74 static inline void insert_offset(khash_t(i) *h, int bin, uint64_t beg, uint64_t end)
79 k = kh_put(i, h, bin, &ret);
81 if (ret) { // not present
83 l->list = (pair64_t*)calloc(l->m, 16);
87 l->list = (pair64_t*)realloc(l->list, l->m * 16);
89 l->list[l->n].u = beg; l->list[l->n++].v = end;
92 static inline void insert_offset2(bam_lidx_t *index2, bam1_t *b, uint64_t offset)
95 beg = b->core.pos >> BAM_LIDX_SHIFT;
96 end = (bam_calend(&b->core, bam1_cigar(b)) - 1) >> BAM_LIDX_SHIFT;
97 if (index2->m < end + 1) {
98 int old_m = index2->m;
100 kroundup32(index2->m);
101 index2->offset = (uint64_t*)realloc(index2->offset, index2->m * 8);
102 memset(index2->offset + old_m, 0, 8 * (index2->m - old_m));
105 if (index2->offset[beg] == 0) index2->offset[beg] = offset;
107 for (i = beg; i <= end; ++i)
108 if (index2->offset[i] == 0) index2->offset[i] = offset;
113 static void merge_chunks(bam_index_t *idx)
115 #if defined(BAM_TRUE_OFFSET) || defined(BAM_VIRTUAL_OFFSET16)
119 for (i = 0; i < idx->n; ++i) {
120 index = idx->index[i];
121 for (k = kh_begin(index); k != kh_end(index); ++k) {
123 if (!kh_exist(index, k) || kh_key(index, k) == BAM_MAX_BIN) continue;
124 p = &kh_value(index, k);
126 for (l = 1; l < p->n; ++l) {
127 #ifdef BAM_TRUE_OFFSET
128 if (p->list[m].v + BAM_MIN_CHUNK_GAP > p->list[l].u) p->list[m].v = p->list[l].v;
130 if (p->list[m].v>>16 == p->list[l].u>>16) p->list[m].v = p->list[l].v;
132 else p->list[++m] = p->list[l];
137 #endif // defined(BAM_TRUE_OFFSET) || defined(BAM_BGZF)
140 static void fill_missing(bam_index_t *idx)
143 for (i = 0; i < idx->n; ++i) {
144 bam_lidx_t *idx2 = &idx->index2[i];
145 for (j = 1; j < idx2->n; ++j)
146 if (idx2->offset[j] == 0)
147 idx2->offset[j] = idx2->offset[j-1];
151 bam_index_t *bam_index_core(bamFile fp)
157 uint32_t last_bin, save_bin, recalculated_bin;
158 int32_t last_coor, last_tid, save_tid;
160 uint64_t save_off, last_off, n_mapped, n_unmapped, off_beg, off_end, n_no_coor;
162 h = bam_header_read(fp);
164 fprintf(stderr, "[bam_index_core] Invalid BAM header.");
168 idx = (bam_index_t*)calloc(1, sizeof(bam_index_t));
169 b = (bam1_t*)calloc(1, sizeof(bam1_t));
172 idx->n = h->n_targets;
173 idx->index = (khash_t(i)**)calloc(idx->n, sizeof(void*));
174 for (i = 0; i < idx->n; ++i) idx->index[i] = kh_init(i);
175 idx->index2 = (bam_lidx_t*)calloc(idx->n, sizeof(bam_lidx_t));
177 save_bin = save_tid = last_tid = last_bin = 0xffffffffu;
178 save_off = last_off = bam_tell(fp); last_coor = 0xffffffffu;
179 n_mapped = n_unmapped = n_no_coor = off_end = 0;
180 off_beg = off_end = bam_tell(fp);
181 while ((ret = bam_read1(fp, b)) >= 0) {
182 if (c->tid < 0) ++n_no_coor;
183 if (last_tid < c->tid || (last_tid >= 0 && c->tid < 0)) { // change of chromosomes
185 last_bin = 0xffffffffu;
186 } else if ((uint32_t)last_tid > (uint32_t)c->tid) {
187 fprintf(stderr, "[bam_index_core] the alignment is not sorted (%s): %d-th chr > %d-th chr\n",
188 bam1_qname(b), last_tid+1, c->tid+1);
190 } else if ((int32_t)c->tid >= 0 && last_coor > c->pos) {
191 fprintf(stderr, "[bam_index_core] the alignment is not sorted (%s): %u > %u in %d-th chr\n",
192 bam1_qname(b), last_coor, c->pos, c->tid+1);
197 recalculated_bin = bam_reg2bin(c->pos, bam_calend(c, bam1_cigar(b)));
199 recalculated_bin = bam_reg2bin(c->pos, c->pos + 1);
201 if (c->bin != recalculated_bin) {
202 fprintf(stderr, "[bam_index_core] read '%s' mapped to '%s' at POS %d to %d has BIN %d but should be %d\n",
203 bam1_qname(b), h->target_name[c->tid], c->pos + 1, bam_calend(c, bam1_cigar(b)), c->bin, recalculated_bin);
204 fprintf(stderr, "[bam_index_core] Fix it by using BAM->SAM->BAM to force a recalculation of the BIN field\n");
208 if (c->tid >= 0 && !(c->flag & BAM_FUNMAP)) insert_offset2(&idx->index2[b->core.tid], b, last_off);
209 if (c->bin != last_bin) { // then possibly write the binning index
210 if (save_bin != 0xffffffffu) // save_bin==0xffffffffu only happens to the first record
211 insert_offset(idx->index[save_tid], save_bin, save_off, last_off);
212 if (last_bin == 0xffffffffu && save_tid != 0xffffffffu) { // write the meta element
214 insert_offset(idx->index[save_tid], BAM_MAX_BIN, off_beg, off_end);
215 insert_offset(idx->index[save_tid], BAM_MAX_BIN, n_mapped, n_unmapped);
216 n_mapped = n_unmapped = 0;
220 save_bin = last_bin = c->bin;
222 if (save_tid < 0) break;
224 if (bam_tell(fp) <= last_off) {
225 fprintf(stderr, "[bam_index_core] bug in BGZF/RAZF: %llx < %llx\n",
226 (unsigned long long)bam_tell(fp), (unsigned long long)last_off);
229 if (c->flag & BAM_FUNMAP) ++n_unmapped;
231 last_off = bam_tell(fp);
232 last_coor = b->core.pos;
234 bam_header_destroy(h);
236 insert_offset(idx->index[save_tid], save_bin, save_off, bam_tell(fp));
237 insert_offset(idx->index[save_tid], BAM_MAX_BIN, off_beg, bam_tell(fp));
238 insert_offset(idx->index[save_tid], BAM_MAX_BIN, n_mapped, n_unmapped);
243 while ((ret = bam_read1(fp, b)) >= 0) {
245 if (c->tid >= 0 && n_no_coor) {
246 fprintf(stderr, "[bam_index_core] the alignment is not sorted: reads without coordinates prior to reads with coordinates.\n");
251 if (ret < -1) fprintf(stderr, "[bam_index_core] truncated file? Continue anyway. (%d)\n", ret);
252 free(b->data); free(b);
253 idx->n_no_coor = n_no_coor;
257 void bam_index_destroy(bam_index_t *idx)
261 if (idx == 0) return;
262 for (i = 0; i < idx->n; ++i) {
263 khash_t(i) *index = idx->index[i];
264 bam_lidx_t *index2 = idx->index2 + i;
265 for (k = kh_begin(index); k != kh_end(index); ++k) {
266 if (kh_exist(index, k))
267 free(kh_value(index, k).list);
269 kh_destroy(i, index);
270 free(index2->offset);
272 free(idx->index); free(idx->index2);
276 void bam_index_save(const bam_index_t *idx, FILE *fp)
280 fwrite("BAI\1", 1, 4, fp);
283 fwrite(bam_swap_endian_4p(&x), 4, 1, fp);
284 } else fwrite(&idx->n, 4, 1, fp);
285 for (i = 0; i < idx->n; ++i) {
286 khash_t(i) *index = idx->index[i];
287 bam_lidx_t *index2 = idx->index2 + i;
288 // write binning index
289 size = kh_size(index);
290 if (bam_is_be) { // big endian
292 fwrite(bam_swap_endian_4p(&x), 4, 1, fp);
293 } else fwrite(&size, 4, 1, fp);
294 for (k = kh_begin(index); k != kh_end(index); ++k) {
295 if (kh_exist(index, k)) {
296 bam_binlist_t *p = &kh_value(index, k);
297 if (bam_is_be) { // big endian
299 x = kh_key(index, k); fwrite(bam_swap_endian_4p(&x), 4, 1, fp);
300 x = p->n; fwrite(bam_swap_endian_4p(&x), 4, 1, fp);
301 for (x = 0; (int)x < p->n; ++x) {
302 bam_swap_endian_8p(&p->list[x].u);
303 bam_swap_endian_8p(&p->list[x].v);
305 fwrite(p->list, 16, p->n, fp);
306 for (x = 0; (int)x < p->n; ++x) {
307 bam_swap_endian_8p(&p->list[x].u);
308 bam_swap_endian_8p(&p->list[x].v);
311 fwrite(&kh_key(index, k), 4, 1, fp);
312 fwrite(&p->n, 4, 1, fp);
313 fwrite(p->list, 16, p->n, fp);
317 // write linear index (index2)
320 fwrite(bam_swap_endian_4p(&x), 4, 1, fp);
321 } else fwrite(&index2->n, 4, 1, fp);
322 if (bam_is_be) { // big endian
324 for (x = 0; (int)x < index2->n; ++x)
325 bam_swap_endian_8p(&index2->offset[x]);
326 fwrite(index2->offset, 8, index2->n, fp);
327 for (x = 0; (int)x < index2->n; ++x)
328 bam_swap_endian_8p(&index2->offset[x]);
329 } else fwrite(index2->offset, 8, index2->n, fp);
331 { // write the number of reads coor-less records.
332 uint64_t x = idx->n_no_coor;
333 if (bam_is_be) bam_swap_endian_8p(&x);
334 fwrite(&x, 8, 1, fp);
339 static bam_index_t *bam_index_load_core(FILE *fp)
345 fprintf(stderr, "[bam_index_load_core] fail to load index.\n");
348 fread(magic, 1, 4, fp);
349 if (strncmp(magic, "BAI\1", 4)) {
350 fprintf(stderr, "[bam_index_load] wrong magic number.\n");
354 idx = (bam_index_t*)calloc(1, sizeof(bam_index_t));
355 fread(&idx->n, 4, 1, fp);
356 if (bam_is_be) bam_swap_endian_4p(&idx->n);
357 idx->index = (khash_t(i)**)calloc(idx->n, sizeof(void*));
358 idx->index2 = (bam_lidx_t*)calloc(idx->n, sizeof(bam_lidx_t));
359 for (i = 0; i < idx->n; ++i) {
361 bam_lidx_t *index2 = idx->index2 + i;
366 index = idx->index[i] = kh_init(i);
367 // load binning index
368 fread(&size, 4, 1, fp);
369 if (bam_is_be) bam_swap_endian_4p(&size);
370 for (j = 0; j < (int)size; ++j) {
371 fread(&key, 4, 1, fp);
372 if (bam_is_be) bam_swap_endian_4p(&key);
373 k = kh_put(i, index, key, &ret);
374 p = &kh_value(index, k);
375 fread(&p->n, 4, 1, fp);
376 if (bam_is_be) bam_swap_endian_4p(&p->n);
378 p->list = (pair64_t*)malloc(p->m * 16);
379 fread(p->list, 16, p->n, fp);
382 for (x = 0; x < p->n; ++x) {
383 bam_swap_endian_8p(&p->list[x].u);
384 bam_swap_endian_8p(&p->list[x].v);
389 fread(&index2->n, 4, 1, fp);
390 if (bam_is_be) bam_swap_endian_4p(&index2->n);
391 index2->m = index2->n;
392 index2->offset = (uint64_t*)calloc(index2->m, 8);
393 fread(index2->offset, index2->n, 8, fp);
395 for (j = 0; j < index2->n; ++j) bam_swap_endian_8p(&index2->offset[j]);
397 if (fread(&idx->n_no_coor, 8, 1, fp) == 0) idx->n_no_coor = 0;
398 if (bam_is_be) bam_swap_endian_8p(&idx->n_no_coor);
402 bam_index_t *bam_index_load_local(const char *_fn)
407 if (strstr(_fn, "ftp://") == _fn || strstr(_fn, "http://") == _fn) {
410 for (p = _fn + l - 1; p >= _fn; --p)
411 if (*p == '/') break;
413 } else fn = strdup(_fn);
414 fnidx = (char*)calloc(strlen(fn) + 5, 1);
415 strcpy(fnidx, fn); strcat(fnidx, ".bai");
416 fp = fopen(fnidx, "rb");
417 if (fp == 0) { // try "{base}.bai"
418 char *s = strstr(fn, "bam");
419 if (s == fn + strlen(fn) - 3) {
421 fnidx[strlen(fn)-1] = 'i';
422 fp = fopen(fnidx, "rb");
425 free(fnidx); free(fn);
427 bam_index_t *idx = bam_index_load_core(fp);
434 static void download_from_remote(const char *url)
436 const int buf_size = 1 * 1024 * 1024;
442 if (strstr(url, "ftp://") != url && strstr(url, "http://") != url) return;
444 for (fn = (char*)url + l - 1; fn >= url; --fn)
445 if (*fn == '/') break;
446 ++fn; // fn now points to the file name
447 fp_remote = knet_open(url, "r");
448 if (fp_remote == 0) {
449 fprintf(stderr, "[download_from_remote] fail to open remote file.\n");
452 if ((fp = fopen(fn, "wb")) == 0) {
453 fprintf(stderr, "[download_from_remote] fail to create file in the working directory.\n");
454 knet_close(fp_remote);
457 buf = (uint8_t*)calloc(buf_size, 1);
458 while ((l = knet_read(fp_remote, buf, buf_size)) != 0)
459 fwrite(buf, 1, l, fp);
462 knet_close(fp_remote);
465 static void download_from_remote(const char *url)
471 bam_index_t *bam_index_load(const char *fn)
474 idx = bam_index_load_local(fn);
475 if (idx == 0 && (strstr(fn, "ftp://") == fn || strstr(fn, "http://") == fn)) {
476 char *fnidx = calloc(strlen(fn) + 5, 1);
477 strcat(strcpy(fnidx, fn), ".bai");
478 fprintf(stderr, "[bam_index_load] attempting to download the remote index file.\n");
479 download_from_remote(fnidx);
481 idx = bam_index_load_local(fn);
483 if (idx == 0) fprintf(stderr, "[bam_index_load] fail to load BAM index.\n");
487 int bam_index_build2(const char *fn, const char *_fnidx)
493 if ((fp = bam_open(fn, "r")) == 0) {
494 fprintf(stderr, "[bam_index_build2] fail to open the BAM file.\n");
497 idx = bam_index_core(fp);
500 fprintf(stderr, "[bam_index_build2] fail to index the BAM file.\n");
504 fnidx = (char*)calloc(strlen(fn) + 5, 1);
505 strcpy(fnidx, fn); strcat(fnidx, ".bai");
506 } else fnidx = strdup(_fnidx);
507 fpidx = fopen(fnidx, "wb");
509 fprintf(stderr, "[bam_index_build2] fail to create the index file.\n");
511 bam_index_destroy(idx);
514 bam_index_save(idx, fpidx);
515 bam_index_destroy(idx);
521 int bam_index_build(const char *fn)
523 return bam_index_build2(fn, 0);
526 int bam_index(int argc, char *argv[])
529 fprintf(stderr, "Usage: samtools index <in.bam> [out.index]\n");
532 if (argc >= 3) bam_index_build2(argv[1], argv[2]);
533 else bam_index_build(argv[1]);
537 int bam_idxstats(int argc, char *argv[])
540 bam_header_t *header;
544 fprintf(stderr, "Usage: samtools idxstats <in.bam>\n");
547 fp = bam_open(argv[1], "r");
548 if (fp == 0) { fprintf(stderr, "[%s] fail to open BAM.\n", __func__); return 1; }
549 header = bam_header_read(fp);
551 idx = bam_index_load(argv[1]);
552 if (idx == 0) { fprintf(stderr, "[%s] fail to load the index.\n", __func__); return 1; }
553 for (i = 0; i < idx->n; ++i) {
555 khash_t(i) *h = idx->index[i];
556 printf("%s\t%d", header->target_name[i], header->target_len[i]);
557 k = kh_get(i, h, BAM_MAX_BIN);
559 printf("\t%llu\t%llu", (long long)kh_val(h, k).list[1].u, (long long)kh_val(h, k).list[1].v);
560 else printf("\t0\t0");
563 printf("*\t0\t0\t%llu\n", (long long)idx->n_no_coor);
564 bam_header_destroy(header);
565 bam_index_destroy(idx);
569 static inline int reg2bins(uint32_t beg, uint32_t end, uint16_t list[BAM_MAX_BIN])
572 if (beg >= end) return 0;
573 if (end >= 1u<<29) end = 1u<<29;
576 for (k = 1 + (beg>>26); k <= 1 + (end>>26); ++k) list[i++] = k;
577 for (k = 9 + (beg>>23); k <= 9 + (end>>23); ++k) list[i++] = k;
578 for (k = 73 + (beg>>20); k <= 73 + (end>>20); ++k) list[i++] = k;
579 for (k = 585 + (beg>>17); k <= 585 + (end>>17); ++k) list[i++] = k;
580 for (k = 4681 + (beg>>14); k <= 4681 + (end>>14); ++k) list[i++] = k;
584 static inline int is_overlap(uint32_t beg, uint32_t end, const bam1_t *b)
586 uint32_t rbeg = b->core.pos;
587 uint32_t rend = b->core.n_cigar? bam_calend(&b->core, bam1_cigar(b)) : b->core.pos + 1;
588 return (rend > beg && rbeg < end);
591 struct __bam_iter_t {
592 int from_first; // read from the first record; no random access
593 int tid, beg, end, n_off, i, finished;
598 // bam_fetch helper function retrieves
599 bam_iter_t bam_iter_query(const bam_index_t *idx, int tid, int beg, int end)
602 int i, n_bins, n_off;
609 if (beg < 0) beg = 0;
610 if (end < beg) return 0;
612 iter = calloc(1, sizeof(struct __bam_iter_t));
613 iter->tid = tid, iter->beg = beg, iter->end = end; iter->i = -1;
615 bins = (uint16_t*)calloc(BAM_MAX_BIN, 2);
616 n_bins = reg2bins(beg, end, bins);
617 index = idx->index[tid];
618 if (idx->index2[tid].n > 0) {
619 min_off = (beg>>BAM_LIDX_SHIFT >= idx->index2[tid].n)? idx->index2[tid].offset[idx->index2[tid].n-1]
620 : idx->index2[tid].offset[beg>>BAM_LIDX_SHIFT];
621 if (min_off == 0) { // improvement for index files built by tabix prior to 0.1.4
622 int n = beg>>BAM_LIDX_SHIFT;
623 if (n > idx->index2[tid].n) n = idx->index2[tid].n;
624 for (i = n - 1; i >= 0; --i)
625 if (idx->index2[tid].offset[i] != 0) break;
626 if (i >= 0) min_off = idx->index2[tid].offset[i];
628 } else min_off = 0; // tabix 0.1.2 may produce such index files
629 for (i = n_off = 0; i < n_bins; ++i) {
630 if ((k = kh_get(i, index, bins[i])) != kh_end(index))
631 n_off += kh_value(index, k).n;
634 free(bins); return iter;
636 off = (pair64_t*)calloc(n_off, 16);
637 for (i = n_off = 0; i < n_bins; ++i) {
638 if ((k = kh_get(i, index, bins[i])) != kh_end(index)) {
640 bam_binlist_t *p = &kh_value(index, k);
641 for (j = 0; j < p->n; ++j)
642 if (p->list[j].v > min_off) off[n_off++] = p->list[j];
647 free(off); return iter;
650 bam1_t *b = (bam1_t*)calloc(1, sizeof(bam1_t));
652 ks_introsort(off, n_off, off);
653 // resolve completely contained adjacent blocks
654 for (i = 1, l = 0; i < n_off; ++i)
655 if (off[l].v < off[i].v)
658 // resolve overlaps between adjacent blocks; this may happen due to the merge in indexing
659 for (i = 1; i < n_off; ++i)
660 if (off[i-1].v >= off[i].u) off[i-1].v = off[i].u;
661 { // merge adjacent blocks
662 #if defined(BAM_TRUE_OFFSET) || defined(BAM_VIRTUAL_OFFSET16)
663 for (i = 1, l = 0; i < n_off; ++i) {
664 #ifdef BAM_TRUE_OFFSET
665 if (off[l].v + BAM_MIN_CHUNK_GAP > off[i].u) off[l].v = off[i].v;
667 if (off[l].v>>16 == off[i].u>>16) off[l].v = off[i].v;
669 else off[++l] = off[i];
676 iter->n_off = n_off; iter->off = off;
680 pair64_t *get_chunk_coordinates(const bam_index_t *idx, int tid, int beg, int end, int *cnt_off)
681 { // for pysam compatibility
684 iter = bam_iter_query(idx, tid, beg, end);
685 off = iter->off; *cnt_off = iter->n_off;
690 void bam_iter_destroy(bam_iter_t iter)
692 if (iter) { free(iter->off); free(iter); }
695 int bam_iter_read(bamFile fp, bam_iter_t iter, bam1_t *b)
698 if (iter && iter->finished) return -1;
699 if (iter == 0 || iter->from_first) {
700 ret = bam_read1(fp, b);
701 if (ret < 0 && iter) iter->finished = 1;
704 if (iter->off == 0) return -1;
706 if (iter->curr_off == 0 || iter->curr_off >= iter->off[iter->i].v) { // then jump to the next chunk
707 if (iter->i == iter->n_off - 1) { ret = -1; break; } // no more chunks
708 if (iter->i >= 0) assert(iter->curr_off == iter->off[iter->i].v); // otherwise bug
709 if (iter->i < 0 || iter->off[iter->i].v != iter->off[iter->i+1].u) { // not adjacent chunks; then seek
710 bam_seek(fp, iter->off[iter->i+1].u, SEEK_SET);
711 iter->curr_off = bam_tell(fp);
715 if ((ret = bam_read1(fp, b)) >= 0) {
716 iter->curr_off = bam_tell(fp);
717 if (b->core.tid != iter->tid || b->core.pos >= iter->end) { // no need to proceed
718 ret = bam_validate1(NULL, b)? -1 : -5; // determine whether end of region or error
721 else if (is_overlap(iter->beg, iter->end, b)) return ret;
722 } else break; // end of file or error
728 int bam_fetch(bamFile fp, const bam_index_t *idx, int tid, int beg, int end, void *data, bam_fetch_f func)
734 iter = bam_iter_query(idx, tid, beg, end);
735 while ((ret = bam_iter_read(fp, iter, b)) >= 0) func(b, data);
736 bam_iter_destroy(iter);
738 return (ret == -1)? 0 : ret;