Lines Matching +full:tightly +full:- +full:coupled
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
50 return range->end - range->start; in range_n_bytes()
56 int start = full ? 0 : mask->range.start; in ovs_flow_mask_key()
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); in ovs_flow_mask_key()
58 const long *m = (const long *)((const u8 *)&mask->key + start); in ovs_flow_mask_key()
64 * if 'full' is false the memory outside of the 'mask->range' is left in ovs_flow_mask_key()
66 * operations on 'dst' only use contents within 'mask->range'. in ovs_flow_mask_key()
79 return ERR_PTR(-ENOMEM); in ovs_flow_alloc()
81 flow->stats_last_writer = -1; in ovs_flow_alloc()
90 spin_lock_init(&stats->lock); in ovs_flow_alloc()
92 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
94 cpumask_set_cpu(0, &flow->cpu_used_mask); in ovs_flow_alloc()
99 return ERR_PTR(-ENOMEM); in ovs_flow_alloc()
104 return table->count; in ovs_flow_tbl_count()
111 if (ovs_identifier_is_key(&flow->id)) in flow_free()
112 kfree(flow->id.unmasked_key); in flow_free()
113 if (flow->sf_acts) in flow_free()
115 flow->sf_acts); in flow_free()
118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { in flow_free()
119 if (flow->stats[cpu]) in flow_free()
121 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
140 call_rcu(&flow->rcu, rcu_free_flow_callback); in ovs_flow_free()
147 kvfree(ti->buckets); in __table_instance_destroy()
159 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), in table_instance_alloc()
161 if (!ti->buckets) { in table_instance_alloc()
167 INIT_HLIST_HEAD(&ti->buckets[i]); in table_instance_alloc()
169 ti->n_buckets = new_size; in table_instance_alloc()
170 ti->node_ver = 0; in table_instance_alloc()
171 get_random_bytes(&ti->hash_seed, sizeof(u32)); in table_instance_alloc()
178 free_percpu(ma->masks_usage_stats); in __mask_array_destroy()
198 for (i = 0; i < ma->max; i++) { in tbl_mask_array_reset_counters()
199 ma->masks_usage_zero_cntr[i] = 0; in tbl_mask_array_reset_counters()
206 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters()
208 start = u64_stats_fetch_begin_irq(&stats->syncp); in tbl_mask_array_reset_counters()
209 counter = stats->usage_cntrs[i]; in tbl_mask_array_reset_counters()
210 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); in tbl_mask_array_reset_counters()
212 ma->masks_usage_zero_cntr[i] += counter; in tbl_mask_array_reset_counters()
228 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + in tbl_mask_array_alloc()
233 new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) + in tbl_mask_array_alloc()
236 if (!new->masks_usage_stats) { in tbl_mask_array_alloc()
241 new->count = 0; in tbl_mask_array_alloc()
242 new->max = size; in tbl_mask_array_alloc()
254 return -ENOMEM; in tbl_mask_array_realloc()
256 old = ovsl_dereference(tbl->mask_array); in tbl_mask_array_realloc()
260 for (i = 0; i < old->max; i++) { in tbl_mask_array_realloc()
261 if (ovsl_dereference(old->masks[i])) in tbl_mask_array_realloc()
262 new->masks[new->count++] = old->masks[i]; in tbl_mask_array_realloc()
264 call_rcu(&old->rcu, mask_array_rcu_cb); in tbl_mask_array_realloc()
267 rcu_assign_pointer(tbl->mask_array, new); in tbl_mask_array_realloc()
275 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_add_mask()
276 int err, ma_count = READ_ONCE(ma->count); in tbl_mask_array_add_mask()
278 if (ma_count >= ma->max) { in tbl_mask_array_add_mask()
279 err = tbl_mask_array_realloc(tbl, ma->max + in tbl_mask_array_add_mask()
284 ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_add_mask()
292 BUG_ON(ovsl_dereference(ma->masks[ma_count])); in tbl_mask_array_add_mask()
294 rcu_assign_pointer(ma->masks[ma_count], new); in tbl_mask_array_add_mask()
295 WRITE_ONCE(ma->count, ma_count + 1); in tbl_mask_array_add_mask()
303 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_del_mask()
304 int i, ma_count = READ_ONCE(ma->count); in tbl_mask_array_del_mask()
308 if (mask == ovsl_dereference(ma->masks[i])) in tbl_mask_array_del_mask()
316 WRITE_ONCE(ma->count, ma_count - 1); in tbl_mask_array_del_mask()
318 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]); in tbl_mask_array_del_mask()
319 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL); in tbl_mask_array_del_mask()
324 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && in tbl_mask_array_del_mask()
325 ma_count <= (ma->max / 3)) in tbl_mask_array_del_mask()
326 tbl_mask_array_realloc(tbl, ma->max / 2); in tbl_mask_array_del_mask()
336 /* ovs-lock is required to protect mask-refcount and in flow_mask_remove()
340 BUG_ON(!mask->ref_count); in flow_mask_remove()
341 mask->ref_count--; in flow_mask_remove()
343 if (!mask->ref_count) in flow_mask_remove()
350 free_percpu(mc->mask_cache); in __mask_cache_destroy()
377 new->cache_size = size; in tbl_mask_cache_alloc()
378 if (new->cache_size > 0) { in tbl_mask_cache_alloc()
380 new->cache_size), in tbl_mask_cache_alloc()
388 new->mask_cache = cache; in tbl_mask_cache_alloc()
393 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); in ovs_flow_tbl_masks_cache_resize()
396 if (size == mc->cache_size) in ovs_flow_tbl_masks_cache_resize()
401 return -EINVAL; in ovs_flow_tbl_masks_cache_resize()
405 return -ENOMEM; in ovs_flow_tbl_masks_cache_resize()
407 rcu_assign_pointer(table->mask_cache, new); in ovs_flow_tbl_masks_cache_resize()
408 call_rcu(&mc->rcu, mask_cache_rcu_cb); in ovs_flow_tbl_masks_cache_resize()
421 return -ENOMEM; in ovs_flow_tbl_init()
435 rcu_assign_pointer(table->ti, ti); in ovs_flow_tbl_init()
436 rcu_assign_pointer(table->ufid_ti, ufid_ti); in ovs_flow_tbl_init()
437 rcu_assign_pointer(table->mask_array, ma); in ovs_flow_tbl_init()
438 rcu_assign_pointer(table->mask_cache, mc); in ovs_flow_tbl_init()
439 table->last_rehash = jiffies; in ovs_flow_tbl_init()
440 table->count = 0; in ovs_flow_tbl_init()
441 table->ufid_count = 0; in ovs_flow_tbl_init()
450 return -ENOMEM; in ovs_flow_tbl_init()
466 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); in table_instance_flow_free()
467 table->count--; in table_instance_flow_free()
469 if (ovs_identifier_is_ufid(&flow->id)) { in table_instance_flow_free()
470 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); in table_instance_flow_free()
471 table->ufid_count--; in table_instance_flow_free()
474 flow_mask_remove(table, flow->mask); in table_instance_flow_free()
484 for (i = 0; i < ti->n_buckets; i++) { in table_instance_flow_flush()
485 struct hlist_head *head = &ti->buckets[i]; in table_instance_flow_flush()
490 flow_table.node[ti->node_ver]) { in table_instance_flow_flush()
498 if (WARN_ON(table->count != 0 || in table_instance_flow_flush()
499 table->ufid_count != 0)) { in table_instance_flow_flush()
500 table->count = 0; in table_instance_flow_flush()
501 table->ufid_count = 0; in table_instance_flow_flush()
508 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in table_instance_destroy()
509 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); in table_instance_destroy()
517 struct table_instance *ti = rcu_dereference_raw(table->ti); in ovs_flow_tbl_destroy()
518 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); in ovs_flow_tbl_destroy()
519 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); in ovs_flow_tbl_destroy()
520 struct mask_array *ma = rcu_dereference_raw(table->mask_array); in ovs_flow_tbl_destroy()
522 call_rcu(&mc->rcu, mask_cache_rcu_cb); in ovs_flow_tbl_destroy()
523 call_rcu(&ma->rcu, mask_array_rcu_cb); in ovs_flow_tbl_destroy()
535 ver = ti->node_ver; in ovs_flow_tbl_dump_next()
536 while (*bucket < ti->n_buckets) { in ovs_flow_tbl_dump_next()
538 head = &ti->buckets[*bucket]; in ovs_flow_tbl_dump_next()
556 hash = jhash_1word(hash, ti->hash_seed); in find_bucket()
557 return &ti->buckets[hash & (ti->n_buckets - 1)]; in find_bucket()
565 head = find_bucket(ti, flow->flow_table.hash); in table_instance_insert()
566 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); in table_instance_insert()
574 head = find_bucket(ti, flow->ufid_table.hash); in ufid_table_instance_insert()
575 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); in ufid_table_instance_insert()
584 old_ver = old->node_ver; in flow_table_copy_flows()
585 new->node_ver = !old_ver; in flow_table_copy_flows()
588 for (i = 0; i < old->n_buckets; i++) { in flow_table_copy_flows()
590 struct hlist_head *head = &old->buckets[i]; in flow_table_copy_flows()
626 return -ENOMEM; in ovs_flow_tbl_flush()
631 old_ti = ovsl_dereference(flow_table->ti); in ovs_flow_tbl_flush()
632 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); in ovs_flow_tbl_flush()
634 rcu_assign_pointer(flow_table->ti, new_ti); in ovs_flow_tbl_flush()
635 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); in ovs_flow_tbl_flush()
636 flow_table->last_rehash = jiffies; in ovs_flow_tbl_flush()
644 return -ENOMEM; in ovs_flow_tbl_flush()
650 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); in flow_hash()
660 if (key->tun_proto) in flow_key_start()
686 return cmp_key(&flow->key, key, range->start, range->end); in flow_cmp_masked_key()
692 struct sw_flow_key *key = match->key; in ovs_flow_cmp_unmasked_key()
694 int key_end = match->range.end; in ovs_flow_cmp_unmasked_key()
696 BUG_ON(ovs_identifier_is_ufid(&flow->id)); in ovs_flow_cmp_unmasked_key()
697 return cmp_key(flow->id.unmasked_key, key, key_start, key_end); in ovs_flow_cmp_unmasked_key()
711 hash = flow_hash(&masked_key, &mask->range); in masked_flow_lookup()
715 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], in masked_flow_lookup()
717 if (flow->mask == mask && flow->flow_table.hash == hash && in masked_flow_lookup()
718 flow_cmp_masked_key(flow, &masked_key, &mask->range)) in masked_flow_lookup()
737 struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats); in flow_lookup()
742 if (likely(*index < ma->max)) { in flow_lookup()
743 mask = rcu_dereference_ovsl(ma->masks[*index]); in flow_lookup()
747 u64_stats_update_begin(&stats->syncp); in flow_lookup()
748 stats->usage_cntrs[*index]++; in flow_lookup()
749 u64_stats_update_end(&stats->syncp); in flow_lookup()
756 for (i = 0; i < ma->max; i++) { in flow_lookup()
761 mask = rcu_dereference_ovsl(ma->masks[i]); in flow_lookup()
768 u64_stats_update_begin(&stats->syncp); in flow_lookup()
769 stats->usage_cntrs[*index]++; in flow_lookup()
770 u64_stats_update_end(&stats->syncp); in flow_lookup()
779 * mask_cache maps flow to probable mask. This cache is not tightly
780 * coupled cache, It means updates to mask list can result in inconsistent
791 struct mask_cache *mc = rcu_dereference(tbl->mask_cache); in ovs_flow_tbl_lookup_stats()
792 struct mask_array *ma = rcu_dereference(tbl->mask_array); in ovs_flow_tbl_lookup_stats()
793 struct table_instance *ti = rcu_dereference(tbl->ti); in ovs_flow_tbl_lookup_stats()
801 if (unlikely(!skb_hash || mc->cache_size == 0)) { in ovs_flow_tbl_lookup_stats()
812 if (key->recirc_id) in ovs_flow_tbl_lookup_stats()
813 skb_hash = jhash_1word(skb_hash, key->recirc_id); in ovs_flow_tbl_lookup_stats()
817 entries = this_cpu_ptr(mc->mask_cache); in ovs_flow_tbl_lookup_stats()
821 int index = hash & (mc->cache_size - 1); in ovs_flow_tbl_lookup_stats()
825 if (e->skb_hash == skb_hash) { in ovs_flow_tbl_lookup_stats()
827 n_cache_hit, &e->mask_index); in ovs_flow_tbl_lookup_stats()
829 e->skb_hash = 0; in ovs_flow_tbl_lookup_stats()
833 if (!ce || e->skb_hash < ce->skb_hash) in ovs_flow_tbl_lookup_stats()
841 &ce->mask_index); in ovs_flow_tbl_lookup_stats()
843 ce->skb_hash = skb_hash; in ovs_flow_tbl_lookup_stats()
852 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup()
853 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); in ovs_flow_tbl_lookup()
872 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in ovs_flow_tbl_lookup_exact()
875 /* Always called under ovs-mutex. */ in ovs_flow_tbl_lookup_exact()
876 for (i = 0; i < ma->max; i++) { in ovs_flow_tbl_lookup_exact()
877 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_exact()
882 mask = ovsl_dereference(ma->masks[i]); in ovs_flow_tbl_lookup_exact()
886 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); in ovs_flow_tbl_lookup_exact()
887 if (flow && ovs_identifier_is_key(&flow->id) && in ovs_flow_tbl_lookup_exact()
898 return jhash(sfid->ufid, sfid->ufid_len, 0); in ufid_hash()
904 if (flow->id.ufid_len != sfid->ufid_len) in ovs_flow_cmp_ufid()
907 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); in ovs_flow_cmp_ufid()
913 if (ovs_identifier_is_ufid(&flow->id)) in ovs_flow_cmp()
914 return flow_cmp_masked_key(flow, match->key, &match->range); in ovs_flow_cmp()
922 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); in ovs_flow_tbl_lookup_ufid()
929 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], in ovs_flow_tbl_lookup_ufid()
931 if (flow->ufid_table.hash == hash && in ovs_flow_tbl_lookup_ufid()
940 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); in ovs_flow_tbl_num_masks()
941 return READ_ONCE(ma->count); in ovs_flow_tbl_num_masks()
946 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); in ovs_flow_tbl_masks_cache_size()
948 return READ_ONCE(mc->cache_size); in ovs_flow_tbl_masks_cache_size()
954 return table_instance_rehash(ti, ti->n_buckets * 2, ufid); in table_instance_expand()
960 struct table_instance *ti = ovsl_dereference(table->ti); in ovs_flow_tbl_remove()
961 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); in ovs_flow_tbl_remove()
963 BUG_ON(table->count == 0); in ovs_flow_tbl_remove()
973 mask->ref_count = 1; in mask_alloc()
981 const u8 *a_ = (const u8 *)&a->key + a->range.start; in mask_equal()
982 const u8 *b_ = (const u8 *)&b->key + b->range.start; in mask_equal()
984 return (a->range.end == b->range.end) in mask_equal()
985 && (a->range.start == b->range.start) in mask_equal()
986 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); in mask_equal()
995 ma = ovsl_dereference(tbl->mask_array); in flow_mask_find()
996 for (i = 0; i < ma->max; i++) { in flow_mask_find()
998 t = ovsl_dereference(ma->masks[i]); in flow_mask_find()
1018 return -ENOMEM; in flow_mask_insert()
1019 mask->key = new->key; in flow_mask_insert()
1020 mask->range = new->range; in flow_mask_insert()
1022 /* Add mask to mask-list. */ in flow_mask_insert()
1025 return -ENOMEM; in flow_mask_insert()
1028 BUG_ON(!mask->ref_count); in flow_mask_insert()
1029 mask->ref_count++; in flow_mask_insert()
1032 flow->mask = mask; in flow_mask_insert()
1042 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); in flow_key_insert()
1043 ti = ovsl_dereference(table->ti); in flow_key_insert()
1045 table->count++; in flow_key_insert()
1048 if (table->count > ti->n_buckets) in flow_key_insert()
1050 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) in flow_key_insert()
1051 new_ti = table_instance_rehash(ti, ti->n_buckets, false); in flow_key_insert()
1054 rcu_assign_pointer(table->ti, new_ti); in flow_key_insert()
1055 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in flow_key_insert()
1056 table->last_rehash = jiffies; in flow_key_insert()
1065 flow->ufid_table.hash = ufid_hash(&flow->id); in flow_ufid_insert()
1066 ti = ovsl_dereference(table->ufid_ti); in flow_ufid_insert()
1068 table->ufid_count++; in flow_ufid_insert()
1071 if (table->ufid_count > ti->n_buckets) { in flow_ufid_insert()
1076 rcu_assign_pointer(table->ufid_ti, new_ti); in flow_ufid_insert()
1077 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in flow_ufid_insert()
1092 if (ovs_identifier_is_ufid(&flow->id)) in ovs_flow_tbl_insert()
1103 return (s64)mc_b->counter - (s64)mc_a->counter; in compare_mask_and_count()
1109 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); in ovs_flow_masks_rebalance()
1116 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), in ovs_flow_masks_rebalance()
1121 for (i = 0; i < ma->max; i++) { in ovs_flow_masks_rebalance()
1125 mask = rcu_dereference_ovsl(ma->masks[i]); in ovs_flow_masks_rebalance()
1137 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in ovs_flow_masks_rebalance()
1139 start = u64_stats_fetch_begin_irq(&stats->syncp); in ovs_flow_masks_rebalance()
1140 counter = stats->usage_cntrs[i]; in ovs_flow_masks_rebalance()
1141 } while (u64_stats_fetch_retry_irq(&stats->syncp, in ovs_flow_masks_rebalance()
1148 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; in ovs_flow_masks_rebalance()
1153 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; in ovs_flow_masks_rebalance()
1173 new = tbl_mask_array_alloc(ma->max); in ovs_flow_masks_rebalance()
1180 if (ovsl_dereference(ma->masks[index])) in ovs_flow_masks_rebalance()
1181 new->masks[new->count++] = ma->masks[index]; in ovs_flow_masks_rebalance()
1184 rcu_assign_pointer(table->mask_array, new); in ovs_flow_masks_rebalance()
1185 call_rcu(&ma->rcu, mask_array_rcu_cb); in ovs_flow_masks_rebalance()
1203 return -ENOMEM; in ovs_flow_init()
1211 return -ENOMEM; in ovs_flow_init()