| /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/ |
| H A D | mmu_context.h | 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 23 extern bool mm_iommu_preregistered(struct mm_struct *mm); 24 extern long mm_iommu_new(struct mm_struct *mm, 27 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 30 extern long mm_iommu_put(struct mm_struct *mm, 32 extern void mm_iommu_init(struct mm_struct *mm); 33 extern void mm_iommu_cleanup(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 37 struct mm_struct *mm, unsigned long ua, unsigned long size); [all …]
|
| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | mmap_lock.h | 9 static inline void mmap_init_lock(struct mm_struct *mm) in mmap_init_lock() argument 11 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 14 static inline void mmap_write_lock(struct mm_struct *mm) in mmap_write_lock() argument 16 down_write(&mm->mmap_lock); in mmap_write_lock() 19 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) in mmap_write_lock_nested() argument 21 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested() 24 static inline int mmap_write_lock_killable(struct mm_struct *mm) in mmap_write_lock_killable() argument 26 return down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable() 29 static inline bool mmap_write_trylock(struct mm_struct *mm) in mmap_write_trylock() argument 31 return down_write_trylock(&mm->mmap_lock) != 0; in mmap_write_trylock() [all …]
|
| H A D | mmu_notifier.h | 93 struct mm_struct *mm); 105 struct mm_struct *mm, 115 struct mm_struct *mm, 126 struct mm_struct *mm, 134 struct mm_struct *mm, 212 struct mm_struct *mm, 226 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); 249 struct mm_struct *mm; member 272 struct mm_struct *mm; member 285 struct mm_struct *mm; member [all …]
|
| /OK3568_Linux_fs/kernel/drivers/rknpu/ |
| H A D | rknpu_mm.c | 11 struct rknpu_mm **mm) in rknpu_mm_create() argument 23 *mm = kzalloc(sizeof(struct rknpu_mm), GFP_KERNEL); in rknpu_mm_create() 24 if (!(*mm)) in rknpu_mm_create() 27 (*mm)->chunk_size = chunk_size; in rknpu_mm_create() 28 (*mm)->total_chunks = mem_size / chunk_size; in rknpu_mm_create() 29 (*mm)->free_chunks = (*mm)->total_chunks; in rknpu_mm_create() 32 ((*mm)->total_chunks + BITS_PER_LONG - 1) / BITS_PER_LONG; in rknpu_mm_create() 34 (*mm)->bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); in rknpu_mm_create() 35 if (!(*mm)->bitmap) { in rknpu_mm_create() 40 mutex_init(&(*mm)->lock); in rknpu_mm_create() [all …]
|
| /OK3568_Linux_fs/kernel/arch/m68k/include/asm/ |
| H A D | mmu_context.h | 8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument 32 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 36 if (mm->context != NO_CONTEXT) in get_mmu_context() 49 mm->context = ctx; in get_mmu_context() 50 context_mm[ctx] = mm; in get_mmu_context() 56 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 61 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 63 if (mm->context != NO_CONTEXT) { in destroy_context() 64 clear_bit(mm->context, context_map); in destroy_context() 65 mm->context = NO_CONTEXT; in destroy_context() [all …]
|
| /OK3568_Linux_fs/kernel/arch/x86/include/asm/ |
| H A D | mmu_context.h | 61 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 63 mm->context.ldt = NULL; in init_new_context_ldt() 64 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 66 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 67 void destroy_context_ldt(struct mm_struct *mm); 68 void ldt_arch_exit_mmap(struct mm_struct *mm); 70 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument 72 struct mm_struct *mm) in ldt_dup_context() argument 76 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument 77 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument [all …]
|
| /OK3568_Linux_fs/kernel/mm/ |
| H A D | mmu_notifier.c | 195 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin() 268 struct mm_struct *mm) in mn_itree_release() argument 273 .mm = mm, in mn_itree_release() 306 struct mm_struct *mm) in mn_hlist_release() argument 325 subscription->ops->release(subscription, mm); in mn_hlist_release() 354 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument 357 mm->notifier_subscriptions; in __mmu_notifier_release() 360 mn_itree_release(subscriptions, mm); in __mmu_notifier_release() 363 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release() 371 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument [all …]
|
| H A D | debug.c | 217 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 247 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, in dump_mm() 249 mm->get_unmapped_area, in dump_mm() 251 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm() 252 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 253 atomic_read(&mm->mm_count), in dump_mm() 254 mm_pgtables_bytes(mm), in dump_mm() 255 mm->map_count, in dump_mm() 256 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 257 (u64)atomic64_read(&mm->pinned_vm), in dump_mm() [all …]
|
| H A D | mmap.c | 80 static void unmap_region(struct mm_struct *mm, 213 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local 220 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1() 223 origbrk = mm->brk; in SYSCALL_DEFINE1() 232 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 234 min_brk = mm->end_data; in SYSCALL_DEFINE1() 236 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 247 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1() 248 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1() 252 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1() [all …]
|
| /OK3568_Linux_fs/kernel/arch/s390/include/asm/ |
| H A D | pgalloc.h | 26 struct page *page_table_alloc_pgste(struct mm_struct *mm); 37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 39 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument 44 if (addr + len > mm->context.asce_limit && in check_asce_limit() 46 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument 55 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one() 62 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument 64 if (!mm_p4d_folded(mm)) in p4d_free() 65 crst_table_free(mm, (unsigned long *) p4d); in p4d_free() [all …]
|
| H A D | mmu_context.h | 19 struct mm_struct *mm) in init_new_context() argument 23 spin_lock_init(&mm->context.lock); in init_new_context() 24 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 26 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 27 atomic_set(&mm->context.flush_count, 0); in init_new_context() 28 atomic_set(&mm->context.is_protected, 0); in init_new_context() 29 mm->context.gmap_asce = 0; in init_new_context() 30 mm->context.flush_mm = 0; in init_new_context() 32 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_buddy.c | 11 static void __igt_dump_block(struct i915_buddy_mm *mm, in __igt_dump_block() argument 20 i915_buddy_block_size(mm, block), in __igt_dump_block() 25 static void igt_dump_block(struct i915_buddy_mm *mm, in igt_dump_block() argument 30 __igt_dump_block(mm, block, false); in igt_dump_block() 34 __igt_dump_block(mm, buddy, true); in igt_dump_block() 37 static int igt_check_block(struct i915_buddy_mm *mm, in igt_check_block() argument 55 block_size = i915_buddy_block_size(mm, block); in igt_check_block() 58 if (block_size < mm->chunk_size) { in igt_check_block() 68 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in igt_check_block() 73 if (!IS_ALIGNED(offset, mm->chunk_size)) { in igt_check_block() [all …]
|
| /OK3568_Linux_fs/kernel/arch/s390/mm/ |
| H A D | pgtable.c | 47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 58 asce = asce ? : mm->context.asce; in ptep_ipte_local() 67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 78 asce = asce ? : mm->context.asce; in ptep_ipte_global() 87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() 99 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct() [all …]
|
| /OK3568_Linux_fs/kernel/arch/powerpc/mm/book3s64/ |
| H A D | mmu_context.c | 92 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument 96 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context() 98 if (!mm->context.hash_context) in hash__init_new_context() 115 if (mm->context.id == 0) { in hash__init_new_context() 116 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context() 117 slice_init_new_context_exec(mm); in hash__init_new_context() 120 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context() 123 if (current->mm->context.hash_context->spt) { in hash__init_new_context() 124 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context() 126 if (!mm->context.hash_context->spt) { in hash__init_new_context() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_shrinker.c | 36 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; in can_release_pages() 59 switch (obj->mm.madv) { in try_to_writeback() 105 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink() 107 &i915->mm.shrink_list, in i915_gem_shrink() 179 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() 183 mm.link))) { in i915_gem_shrink() 184 list_move_tail(&obj->mm.link, &still_in_list); in i915_gem_shrink() 187 !is_vmalloc_addr(obj->mm.mapping)) in i915_gem_shrink() 200 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink() 204 mutex_lock(&obj->mm.lock); in i915_gem_shrink() [all …]
|
| H A D | i915_gem_userptr.c | 19 struct mm_struct *mm; member 35 struct i915_mm_struct *mm; member 158 i915_mmu_notifier_create(struct i915_mm_struct *mm) in i915_mmu_notifier_create() argument 169 mn->mm = mm; in i915_mmu_notifier_create() 190 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument 195 mn = READ_ONCE(mm->mn); in i915_mmu_notifier_find() 199 mn = i915_mmu_notifier_create(mm); in i915_mmu_notifier_find() 203 err = mmu_notifier_register(&mn->mn, mm->mm); in i915_mmu_notifier_find() 209 old = cmpxchg(&mm->mn, NULL, mn); in i915_mmu_notifier_find() 211 mmu_notifier_unregister(&mn->mn, mm->mm); in i915_mmu_notifier_find() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/ |
| H A D | i915_buddy.c | 75 static void mark_free(struct i915_buddy_mm *mm, in mark_free() argument 82 &mm->free_list[i915_buddy_block_order(block)]); in mark_free() 93 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) in i915_buddy_init() argument 109 mm->size = size; in i915_buddy_init() 110 mm->chunk_size = chunk_size; in i915_buddy_init() 111 mm->max_order = ilog2(size) - ilog2(chunk_size); in i915_buddy_init() 113 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); in i915_buddy_init() 115 mm->free_list = kmalloc_array(mm->max_order + 1, in i915_buddy_init() 118 if (!mm->free_list) in i915_buddy_init() 121 for (i = 0; i <= mm->max_order; ++i) in i915_buddy_init() [all …]
|
| /OK3568_Linux_fs/kernel/arch/sparc/include/asm/ |
| H A D | mmu_context_64.h | 19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument 28 void get_new_mmu_context(struct mm_struct *mm); 29 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 30 void destroy_context(struct mm_struct *mm); 38 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument 41 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx() 42 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx() 44 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx() 45 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx() 50 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx() [all …]
|
| /OK3568_Linux_fs/kernel/arch/powerpc/mm/ |
| H A D | slice.c | 86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument 91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free() 93 vma = find_vma(mm, addr); in slice_area_is_free() 97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument 99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma() 103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument 114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma() 117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument 127 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free() 134 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free() [all …]
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/ |
| H A D | ldt.c | 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 138 struct mm_struct *mm = __mm; in flush_ldt() local 140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 143 load_mm_ldt(mm); in flush_ldt() 189 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument 193 if (mm->context.ldt) { in do_sanity_check() 234 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument 236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user() 243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user() [all …]
|
| /OK3568_Linux_fs/kernel/arch/sparc/mm/ |
| H A D | tsb.c | 121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local 124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user() 127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user() 128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user() 140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user() 142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user() 149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user() 152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument 157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page() [all …]
|
| H A D | tlb.c | 25 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 32 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 34 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 37 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 40 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 68 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument 80 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 86 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one() 87 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one() 92 tb->mm = mm; in tlb_batch_add_one() [all …]
|
| /OK3568_Linux_fs/kernel/arch/arm/include/asm/ |
| H A D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 30 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 32 atomic64_set(&mm->context.id, 0); in init_new_context() 37 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 40 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 50 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument 53 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context() 54 __check_vmalloc_seq(mm); in check_and_switch_context() 64 mm->context.switch_pending = 1; in check_and_switch_context() [all …]
|
| /OK3568_Linux_fs/kernel/arch/um/kernel/skas/ |
| H A D | mmu.c | 17 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, in init_stub_pte() argument 26 pgd = pgd_offset(mm, proc); in init_stub_pte() 28 p4d = p4d_alloc(mm, pgd, proc); in init_stub_pte() 32 pud = pud_alloc(mm, p4d, proc); in init_stub_pte() 36 pmd = pmd_alloc(mm, pud, proc); in init_stub_pte() 40 pte = pte_alloc_map(mm, pmd, proc); in init_stub_pte() 49 pmd_free(mm, pmd); in init_stub_pte() 51 pud_free(mm, pud); in init_stub_pte() 53 p4d_free(mm, p4d); in init_stub_pte() 58 int init_new_context(struct task_struct *task, struct mm_struct *mm) in init_new_context() argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/ |
| H A D | drm_mm.c | 118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 149 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 162 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 163 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 170 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 193 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 271 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|