Lines Matching +full:cache +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
45 track->pid = current->pid; in kasan_set_track()
46 track->stack = kasan_save_stack(flags); in kasan_set_track()
52 current->kasan_depth++; in kasan_enable_current()
57 current->kasan_depth--; in kasan_disable_current()
61 void __kasan_unpoison_range(const void *address, size_t size) in __kasan_unpoison_range() argument
63 kasan_unpoison(address, size, false); in __kasan_unpoison_range()
83 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below()
85 kasan_unpoison(base, watermark - base, false); in kasan_unpoison_task_stack_below()
90 * Only allow cache merging when stack collection is disabled and no metadata
128 object_size <= 64 - 16 ? 16 : in optimal_redzone()
129 object_size <= 128 - 32 ? 32 : in optimal_redzone()
130 object_size <= 512 - 64 ? 64 : in optimal_redzone()
131 object_size <= 4096 - 128 ? 128 : in optimal_redzone()
132 object_size <= (1 << 14) - 256 ? 256 : in optimal_redzone()
133 object_size <= (1 << 15) - 512 ? 512 : in optimal_redzone()
134 object_size <= (1 << 16) - 1024 ? 1024 : 2048; in optimal_redzone()
137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in __kasan_cache_create() argument
146 * 1. In slab_ksize() when calculating the size of the accessible in __kasan_cache_create()
155 ok_size = *size; in __kasan_cache_create()
158 cache->kasan_info.alloc_meta_offset = *size; in __kasan_cache_create()
159 *size += sizeof(struct kasan_alloc_meta); in __kasan_cache_create()
167 if (*size > KMALLOC_MAX_SIZE) { in __kasan_cache_create()
168 cache->kasan_info.alloc_meta_offset = 0; in __kasan_cache_create()
169 *size = ok_size; in __kasan_cache_create()
175 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; in __kasan_cache_create()
187 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. in __kasan_cache_create()
189 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || in __kasan_cache_create()
190 cache->object_size < sizeof(struct kasan_free_meta)) { in __kasan_cache_create()
191 ok_size = *size; in __kasan_cache_create()
193 cache->kasan_info.free_meta_offset = *size; in __kasan_cache_create()
194 *size += sizeof(struct kasan_free_meta); in __kasan_cache_create()
197 if (*size > KMALLOC_MAX_SIZE) { in __kasan_cache_create()
198 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; in __kasan_cache_create()
199 *size = ok_size; in __kasan_cache_create()
203 /* Calculate size with optimal redzone. */ in __kasan_cache_create()
204 optimal_size = cache->object_size + optimal_redzone(cache->object_size); in __kasan_cache_create()
208 /* Use optimal size if the size with added metas is not large enough. */ in __kasan_cache_create()
209 if (*size < optimal_size) in __kasan_cache_create()
210 *size = optimal_size; in __kasan_cache_create()
213 void __kasan_cache_create_kmalloc(struct kmem_cache *cache) in __kasan_cache_create_kmalloc() argument
215 cache->kasan_info.is_kmalloc = true; in __kasan_cache_create_kmalloc()
218 size_t __kasan_metadata_size(struct kmem_cache *cache) in __kasan_metadata_size() argument
222 return (cache->kasan_info.alloc_meta_offset ? in __kasan_metadata_size()
224 (cache->kasan_info.free_meta_offset ? in __kasan_metadata_size()
228 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, in kasan_get_alloc_meta() argument
231 if (!cache->kasan_info.alloc_meta_offset) in kasan_get_alloc_meta()
233 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; in kasan_get_alloc_meta()
237 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, in kasan_get_free_meta() argument
241 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) in kasan_get_free_meta()
243 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; in kasan_get_free_meta()
257 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in __kasan_unpoison_object_data() argument
259 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_object_data()
262 void __kasan_poison_object_data(struct kmem_cache *cache, void *object) in __kasan_poison_object_data() argument
264 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in __kasan_poison_object_data()
270 * 1. A cache might have a constructor, which might save a pointer to a slab
274 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
282 static inline u8 assign_tag(struct kmem_cache *cache, in assign_tag() argument
289 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU in assign_tag()
292 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag()
298 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); in assign_tag()
308 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, in __kasan_init_slab_obj() argument
314 alloc_meta = kasan_get_alloc_meta(cache, object); in __kasan_init_slab_obj()
320 object = set_tag(object, assign_tag(cache, object, true)); in __kasan_init_slab_obj()
325 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, in ____kasan_slab_free() argument
338 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != in ____kasan_slab_free()
345 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) in ____kasan_slab_free()
353 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in ____kasan_slab_free()
360 kasan_set_free_info(cache, object, tag); in ____kasan_slab_free()
362 return kasan_quarantine_put(cache, object); in ____kasan_slab_free()
365 bool __kasan_slab_free(struct kmem_cache *cache, void *object, in __kasan_slab_free() argument
368 return ____kasan_slab_free(cache, object, ip, true, init); in __kasan_slab_free()
405 * !PageSlab() when the size provided to kmalloc is larger than in __kasan_slab_free_mempool()
413 ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); in __kasan_slab_free_mempool()
417 static void set_alloc_info(struct kmem_cache *cache, void *object, in set_alloc_info() argument
423 if (cache->kasan_info.is_kmalloc && !is_kmalloc) in set_alloc_info()
426 alloc_meta = kasan_get_alloc_meta(cache, object); in set_alloc_info()
428 kasan_set_track(&alloc_meta->alloc_track, flags); in set_alloc_info()
431 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, in __kasan_slab_alloc() argument
447 * Generate and assign random tag for tag-based modes. in __kasan_slab_alloc()
450 tag = assign_tag(cache, object, false); in __kasan_slab_alloc()
457 kasan_unpoison(tagged_object, cache->object_size, init); in __kasan_slab_alloc()
459 /* Save alloc info (if possible) for non-kmalloc() allocations. */ in __kasan_slab_alloc()
461 set_alloc_info(cache, (void *)object, flags, false); in __kasan_slab_alloc()
466 static inline void *____kasan_kmalloc(struct kmem_cache *cache, in ____kasan_kmalloc() argument
467 const void *object, size_t size, gfp_t flags) in ____kasan_kmalloc() argument
487 * The redzone has byte-level precision for the generic mode. in ____kasan_kmalloc()
492 kasan_poison_last_granule((void *)object, size); in ____kasan_kmalloc()
495 redzone_start = round_up((unsigned long)(object + size), in ____kasan_kmalloc()
497 redzone_end = round_up((unsigned long)(object + cache->object_size), in ____kasan_kmalloc()
499 kasan_poison((void *)redzone_start, redzone_end - redzone_start, in ____kasan_kmalloc()
507 set_alloc_info(cache, (void *)object, flags, true); in ____kasan_kmalloc()
513 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, in __kasan_kmalloc() argument
514 size_t size, gfp_t flags) in __kasan_kmalloc() argument
516 return ____kasan_kmalloc(cache, object, size, flags); in __kasan_kmalloc()
520 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, in __kasan_kmalloc_large() argument
538 * The redzone has byte-level precision for the generic mode. in __kasan_kmalloc_large()
543 kasan_poison_last_granule(ptr, size); in __kasan_kmalloc_large()
546 redzone_start = round_up((unsigned long)(ptr + size), in __kasan_kmalloc_large()
549 kasan_poison((void *)redzone_start, redzone_end - redzone_start, in __kasan_kmalloc_large()
555 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) in __kasan_krealloc() argument
567 kasan_unpoison(object, size, false); in __kasan_krealloc()
571 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ in __kasan_krealloc()
573 return __kasan_kmalloc_large(object, size, flags); in __kasan_krealloc()
575 return ____kasan_kmalloc(page->slab_cache, object, size, flags); in __kasan_krealloc()