Lines Matching refs:k

99 #define PTR_HASH(c, k)							\  argument
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
128 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put() argument
132 for (i = 0; i < KEY_PTRS(k); i++) in bkey_put()
133 if (ptr_available(c, k, i)) in bkey_put()
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
342 BKEY_PADDED(key) k; in do_btree_node_write()
371 bkey_copy(&k.key, &b->key); in do_btree_node_write()
372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + in do_btree_node_write()
385 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
396 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
543 static unsigned int btree_order(struct bkey *k) in btree_order() argument
545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); in btree_order()
548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) in mca_data_alloc() argument
553 btree_order(k)), in mca_data_alloc()
563 struct bkey *k, gfp_t gfp) in mca_bucket_alloc() argument
583 mca_data_alloc(b, k, gfp); in mca_bucket_alloc()
824 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash() argument
826 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
829 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find() argument
834 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
835 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
861 struct bkey *k) in mca_cannibalize() argument
871 if (!mca_reap(b, btree_order(k), false)) in mca_cannibalize()
875 if (!mca_reap(b, btree_order(k), true)) in mca_cannibalize()
899 struct bkey *k, int level) in mca_alloc() argument
907 if (mca_find(c, k)) in mca_alloc()
914 if (!mca_reap(b, btree_order(k), false)) in mca_alloc()
922 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
929 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
939 bkey_copy(&b->key, k); in mca_alloc()
942 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
962 b = mca_cannibalize(c, op, k); in mca_alloc()
979 struct bkey *k, int level, bool write, in bch_btree_node_get() argument
987 b = mca_find(c, k); in bch_btree_node_get()
994 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1008 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1035 static void btree_node_prefetch(struct btree *parent, struct bkey *k) in btree_node_prefetch() argument
1040 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1092 BKEY_PADDED(key) k; in __bch_btree_node_alloc()
1097 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) in __bch_btree_node_alloc()
1100 bkey_put(c, &k.key); in __bch_btree_node_alloc()
1101 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1103 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1121 bch_bucket_free(c, &k.key); in __bch_btree_node_alloc()
1151 static void make_btree_freeing_key(struct btree *b, struct bkey *k) in make_btree_freeing_key() argument
1159 bkey_copy(k, &b->key); in make_btree_freeing_key()
1160 bkey_copy_key(k, &ZERO_KEY); in make_btree_freeing_key()
1162 for (i = 0; i < KEY_PTRS(k); i++) in make_btree_freeing_key()
1163 SET_PTR_GEN(k, i, in make_btree_freeing_key()
1194 struct bkey *k) in __bch_btree_mark_key() argument
1205 if (!bkey_cmp(k, &ZERO_KEY)) in __bch_btree_mark_key()
1208 for (i = 0; i < KEY_PTRS(k); i++) { in __bch_btree_mark_key()
1209 if (!ptr_available(c, k, i)) in __bch_btree_mark_key()
1212 g = PTR_BUCKET(c, k, i); in __bch_btree_mark_key()
1214 if (gen_after(g->last_gc, PTR_GEN(k, i))) in __bch_btree_mark_key()
1215 g->last_gc = PTR_GEN(k, i); in __bch_btree_mark_key()
1217 if (ptr_stale(c, k, i)) { in __bch_btree_mark_key()
1218 stale = max(stale, ptr_stale(c, k, i)); in __bch_btree_mark_key()
1229 else if (KEY_DIRTY(k)) in __bch_btree_mark_key()
1236 GC_SECTORS_USED(g) + KEY_SIZE(k), in __bch_btree_mark_key()
1245 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) argument
1247 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) in bch_initial_mark_key() argument
1251 for (i = 0; i < KEY_PTRS(k); i++) in bch_initial_mark_key()
1252 if (ptr_available(c, k, i) && in bch_initial_mark_key()
1253 !ptr_stale(c, k, i)) { in bch_initial_mark_key()
1254 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key()
1256 b->gen = PTR_GEN(k, i); in bch_initial_mark_key()
1258 if (level && bkey_cmp(k, &ZERO_KEY)) in bch_initial_mark_key()
1264 __bch_btree_mark_key(c, level, k); in bch_initial_mark_key()
1276 struct bkey *k; in btree_gc_mark_node() local
1282 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node()
1283 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node()
1286 if (bch_ptr_bad(&b->keys, k)) in btree_gc_mark_node()
1289 gc->key_bytes += bkey_u64s(k); in btree_gc_mark_node()
1293 gc->data += KEY_SIZE(k); in btree_gc_mark_node()
1333 struct bkey *k; in btree_gc_coalesce() local
1374 struct bkey *k, *last = NULL; in btree_gc_coalesce() local
1379 for (k = n2->start; in btree_gc_coalesce()
1380 k < bset_bkey_last(n2); in btree_gc_coalesce()
1381 k = bkey_next(k)) { in btree_gc_coalesce()
1383 bkey_u64s(k), in btree_gc_coalesce()
1387 last = k; in btree_gc_coalesce()
1388 keys += bkey_u64s(k); in btree_gc_coalesce()
1484 while ((k = bch_keylist_pop(&keylist))) in btree_gc_coalesce()
1485 if (!bkey_cmp(k, &ZERO_KEY)) in btree_gc_coalesce()
1535 struct bkey *k; in btree_gc_count_keys() local
1539 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in btree_gc_count_keys()
1540 ret += bkey_u64s(k); in btree_gc_count_keys()
1576 struct bkey *k; in btree_gc_recurse() local
1587 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); in btree_gc_recurse()
1588 if (k) { in btree_gc_recurse()
1589 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1726 uint64_t *k; in bch_btree_gc_finish() local
1764 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++) in bch_btree_gc_finish()
1765 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1767 for (k = ca->prio_buckets; in bch_btree_gc_finish()
1768 k < ca->prio_buckets + prio_buckets(ca) * 2; k++) in bch_btree_gc_finish()
1769 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA); in bch_btree_gc_finish()
1876 struct bkey *k, *p = NULL; in bch_btree_check_recurse() local
1879 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) in bch_btree_check_recurse()
1880 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1888 k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_check_recurse()
1890 if (k) { in bch_btree_check_recurse()
1891 btree_node_prefetch(b, k); in bch_btree_check_recurse()
1902 p = k; in bch_btree_check_recurse()
1917 struct bkey *k, *p; in bch_btree_check_thread() local
1920 k = p = NULL; in bch_btree_check_thread()
1926 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1927 BUG_ON(!k); in bch_btree_check_thread()
1929 p = k; in bch_btree_check_thread()
1930 while (k) { in bch_btree_check_thread()
1944 k = bch_btree_iter_next_filter(&iter, in bch_btree_check_thread()
1947 if (k) in bch_btree_check_thread()
1948 p = k; in bch_btree_check_thread()
2007 struct bkey *k = NULL; in bch_btree_check() local
2012 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2013 bch_initial_mark_key(c, c->root->level, k); in bch_btree_check()
2114 static bool btree_insert_key(struct btree *b, struct bkey *k, in btree_insert_key() argument
2119 BUG_ON(bkey_cmp(k, &b->key) > 0); in btree_insert_key()
2121 status = bch_btree_insert_key(&b->keys, k, replace_key); in btree_insert_key()
2126 trace_bcache_btree_insert_key(b, k, replace_key != NULL, in btree_insert_key()
2154 struct bkey *k = insert_keys->keys; in bch_btree_insert_keys() local
2156 if (bkey_u64s(k) > insert_u64s_remaining(b)) in bch_btree_insert_keys()
2159 if (bkey_cmp(k, &b->key) <= 0) { in bch_btree_insert_keys()
2161 bkey_put(b->c, k); in bch_btree_insert_keys()
2163 ret |= btree_insert_key(b, k, replace_key); in bch_btree_insert_keys()
2165 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { in bch_btree_insert_keys()
2458 struct bkey *k; in bch_btree_insert() local
2462 while ((k = bch_keylist_pop(keys))) in bch_btree_insert()
2463 bkey_put(c, k); in bch_btree_insert()
2503 struct bkey *k; in bch_btree_map_nodes_recurse() local
2508 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, in bch_btree_map_nodes_recurse()
2510 ret = bcache_btree(map_nodes_recurse, k, b, in bch_btree_map_nodes_recurse()
2536 struct bkey *k; in bch_btree_map_keys_recurse() local
2541 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { in bch_btree_map_keys_recurse()
2543 ? fn(op, b, k) in bch_btree_map_keys_recurse()
2544 : bcache_btree(map_keys_recurse, k, in bch_btree_map_keys_recurse()
2592 struct bkey *k) in refill_keybuf_fn() argument
2598 if (bkey_cmp(k, refill->end) > 0) { in refill_keybuf_fn()
2603 if (!KEY_SIZE(k)) /* end key */ in refill_keybuf_fn()
2606 if (refill->pred(buf, k)) { in refill_keybuf_fn()
2618 bkey_copy(&w->key, k); in refill_keybuf_fn()
2631 buf->last_scanned = *k; in refill_keybuf_fn()