Lines Matching refs:ca

359 	struct cache *ca = bio->bi_private;  in write_super_endio()  local
362 bch_count_io_errors(ca, bio->bi_status, 0, in write_super_endio()
364 closure_put(&ca->set->sb_write); in write_super_endio()
377 struct cache *ca = c->cache; in bcache_write_super() local
378 struct bio *bio = &ca->sb_bio; in bcache_write_super()
384 ca->sb.seq++; in bcache_write_super()
386 if (ca->sb.version < version) in bcache_write_super()
387 ca->sb.version = version; in bcache_write_super()
389 bio_init(bio, ca->sb_bv, 1); in bcache_write_super()
390 bio_set_dev(bio, ca->bdev); in bcache_write_super()
392 bio->bi_private = ca; in bcache_write_super()
395 __write_super(&ca->sb, ca->sb_disk, bio); in bcache_write_super()
505 struct cache *ca = c->cache; in __uuid_write() local
514 size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; in __uuid_write()
520 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); in __uuid_write()
585 struct cache *ca = bio->bi_private; in prio_endio() local
587 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); in prio_endio()
588 bch_bbio_free(bio, ca->set); in prio_endio()
589 closure_put(&ca->prio); in prio_endio()
592 static void prio_io(struct cache *ca, uint64_t bucket, int op, in prio_io() argument
595 struct closure *cl = &ca->prio; in prio_io()
596 struct bio *bio = bch_bbio_alloc(ca->set); in prio_io()
600 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
601 bio_set_dev(bio, ca->bdev); in prio_io()
602 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); in prio_io()
605 bio->bi_private = ca; in prio_io()
607 bch_bio_map(bio, ca->disk_buckets); in prio_io()
609 closure_bio_submit(ca->set, bio, &ca->prio); in prio_io()
613 int bch_prio_write(struct cache *ca, bool wait) in bch_prio_write() argument
620 fifo_used(&ca->free[RESERVE_PRIO]), in bch_prio_write()
621 fifo_used(&ca->free[RESERVE_NONE]), in bch_prio_write()
622 fifo_used(&ca->free_inc)); in bch_prio_write()
630 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + in bch_prio_write()
631 fifo_used(&ca->free[RESERVE_NONE]); in bch_prio_write()
632 if (prio_buckets(ca) > avail) in bch_prio_write()
638 lockdep_assert_held(&ca->set->bucket_lock); in bch_prio_write()
640 ca->disk_buckets->seq++; in bch_prio_write()
642 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), in bch_prio_write()
643 &ca->meta_sectors_written); in bch_prio_write()
645 for (i = prio_buckets(ca) - 1; i >= 0; --i) { in bch_prio_write()
647 struct prio_set *p = ca->disk_buckets; in bch_prio_write()
649 struct bucket_disk *end = d + prios_per_bucket(ca); in bch_prio_write()
651 for (b = ca->buckets + i * prios_per_bucket(ca); in bch_prio_write()
652 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write()
658 p->next_bucket = ca->prio_buckets[i + 1]; in bch_prio_write()
659 p->magic = pset_magic(&ca->sb); in bch_prio_write()
660 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); in bch_prio_write()
662 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); in bch_prio_write()
665 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
666 prio_io(ca, bucket, REQ_OP_WRITE, 0); in bch_prio_write()
667 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
669 ca->prio_buckets[i] = bucket; in bch_prio_write()
670 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
673 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
675 bch_journal_meta(ca->set, &cl); in bch_prio_write()
678 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
684 for (i = 0; i < prio_buckets(ca); i++) { in bch_prio_write()
685 if (ca->prio_last_buckets[i]) in bch_prio_write()
686 __bch_bucket_free(ca, in bch_prio_write()
687 &ca->buckets[ca->prio_last_buckets[i]]); in bch_prio_write()
689 ca->prio_last_buckets[i] = ca->prio_buckets[i]; in bch_prio_write()
694 static int prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
696 struct prio_set *p = ca->disk_buckets; in prio_read()
697 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; in prio_read()
702 for (b = ca->buckets; in prio_read()
703 b < ca->buckets + ca->sb.nbuckets; in prio_read()
706 ca->prio_buckets[bucket_nr] = bucket; in prio_read()
707 ca->prio_last_buckets[bucket_nr] = bucket; in prio_read()
710 prio_io(ca, bucket, REQ_OP_READ, 0); in prio_read()
713 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { in prio_read()
718 if (p->magic != pset_magic(&ca->sb)) { in prio_read()
796 struct cache *ca = d->c->cache; in bcache_device_unlink() local
801 bd_unlink_disk_holder(ca->bdev, d->disk); in bcache_device_unlink()
808 struct cache *ca = c->cache; in bcache_device_link() local
811 bd_link_disk_holder(ca->bdev, d->disk); in bcache_device_link()
1699 struct cache *ca; in cache_set_free() local
1711 ca = c->cache; in cache_set_free()
1712 if (ca) { in cache_set_free()
1713 ca->set = NULL; in cache_set_free()
1715 kobject_put(&ca->kobj); in cache_set_free()
1740 struct cache *ca = c->cache; in cache_set_flush() local
1766 if (ca->alloc_thread) in cache_set_flush()
1767 kthread_stop(ca->alloc_thread); in cache_set_flush()
1883 struct cache *ca = container_of(sb, struct cache, sb); in bch_cache_set_alloc() local
1907 c->cache = ca; in bch_cache_set_alloc()
1998 struct cache *ca = c->cache; in run_cache_set() local
2005 c->nbuckets = ca->sb.nbuckets; in run_cache_set()
2025 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) in run_cache_set()
2070 if (bch_cache_allocator_start(ca)) in run_cache_set()
2093 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set()
2096 for (j = 0; j < ca->sb.keys; j++) in run_cache_set()
2097 ca->sb.d[j] = ca->sb.first_bucket + j; in run_cache_set()
2102 if (bch_cache_allocator_start(ca)) in run_cache_set()
2106 bch_prio_write(ca, true); in run_cache_set()
2170 static const char *register_cache_set(struct cache *ca) in register_cache_set() argument
2177 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
2184 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
2200 sprintf(buf, "cache%i", ca->sb.nr_this_dev); in register_cache_set()
2201 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
2202 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
2205 kobject_get(&ca->kobj); in register_cache_set()
2206 ca->set = c; in register_cache_set()
2207 ca->set->cache = ca; in register_cache_set()
2224 struct cache *ca = container_of(kobj, struct cache, kobj); in bch_cache_release() local
2227 if (ca->set) { in bch_cache_release()
2228 BUG_ON(ca->set->cache != ca); in bch_cache_release()
2229 ca->set->cache = NULL; in bch_cache_release()
2232 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); in bch_cache_release()
2233 kfree(ca->prio_buckets); in bch_cache_release()
2234 vfree(ca->buckets); in bch_cache_release()
2236 free_heap(&ca->heap); in bch_cache_release()
2237 free_fifo(&ca->free_inc); in bch_cache_release()
2240 free_fifo(&ca->free[i]); in bch_cache_release()
2242 if (ca->sb_disk) in bch_cache_release()
2243 put_page(virt_to_page(ca->sb_disk)); in bch_cache_release()
2245 if (!IS_ERR_OR_NULL(ca->bdev)) in bch_cache_release()
2246 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in bch_cache_release()
2248 kfree(ca); in bch_cache_release()
2252 static int cache_alloc(struct cache *ca) in cache_alloc() argument
2261 kobject_init(&ca->kobj, &bch_cache_ktype); in cache_alloc()
2263 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); in cache_alloc()
2274 btree_buckets = ca->sb.njournal_buckets ?: 8; in cache_alloc()
2275 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc()
2282 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, in cache_alloc()
2288 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), in cache_alloc()
2294 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { in cache_alloc()
2299 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { in cache_alloc()
2304 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { in cache_alloc()
2309 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { in cache_alloc()
2314 ca->buckets = vzalloc(array_size(sizeof(struct bucket), in cache_alloc()
2315 ca->sb.nbuckets)); in cache_alloc()
2316 if (!ca->buckets) { in cache_alloc()
2321 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), in cache_alloc()
2322 prio_buckets(ca), 2), in cache_alloc()
2324 if (!ca->prio_buckets) { in cache_alloc()
2329 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); in cache_alloc()
2330 if (!ca->disk_buckets) { in cache_alloc()
2335 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); in cache_alloc()
2337 for_each_bucket(b, ca) in cache_alloc()
2342 kfree(ca->prio_buckets); in cache_alloc()
2344 vfree(ca->buckets); in cache_alloc()
2346 free_heap(&ca->heap); in cache_alloc()
2348 free_fifo(&ca->free_inc); in cache_alloc()
2350 free_fifo(&ca->free[RESERVE_NONE]); in cache_alloc()
2352 free_fifo(&ca->free[RESERVE_MOVINGGC]); in cache_alloc()
2354 free_fifo(&ca->free[RESERVE_PRIO]); in cache_alloc()
2356 free_fifo(&ca->free[RESERVE_BTREE]); in cache_alloc()
2361 pr_notice("error %s: %s\n", ca->cache_dev_name, err); in cache_alloc()
2366 struct block_device *bdev, struct cache *ca) in register_cache() argument
2371 bdevname(bdev, ca->cache_dev_name); in register_cache()
2372 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); in register_cache()
2373 ca->bdev = bdev; in register_cache()
2374 ca->bdev->bd_holder = ca; in register_cache()
2375 ca->sb_disk = sb_disk; in register_cache()
2378 ca->discard = CACHE_DISCARD(&ca->sb); in register_cache()
2380 ret = cache_alloc(ca); in register_cache()
2398 if (kobject_add(&ca->kobj, in register_cache()
2407 err = register_cache_set(ca); in register_cache()
2415 pr_info("registered cache device %s\n", ca->cache_dev_name); in register_cache()
2418 kobject_put(&ca->kobj); in register_cache()
2422 pr_notice("error %s: %s\n", ca->cache_dev_name, err); in register_cache()
2459 struct cache *ca = c->cache; in bch_is_open_cache() local
2461 if (ca->bdev == bdev) in bch_is_open_cache()
2516 struct cache *ca; in register_cache_worker() local
2518 ca = kzalloc(sizeof(*ca), GFP_KERNEL); in register_cache_worker()
2519 if (!ca) { in register_cache_worker()
2527 if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0) in register_cache_worker()
2652 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); in register_bcache() local
2654 if (!ca) in register_bcache()
2658 if (register_cache(sb, sb_disk, bdev, ca) != 0) in register_bcache()