| /OK3568_Linux_fs/kernel/include/linux/ |
| H A D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 25 __acquires(lock); 26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 27 __acquires(lock); [all …]
|
| H A D | spinlock_api_up.h | 19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) argument 24 * flags straight, to suppress compiler warnings of unused lock 27 #define ___LOCK(lock) \ argument 28 do { __acquire(lock); (void)(lock); } while (0) 30 #define __LOCK(lock) \ argument 31 do { preempt_disable(); ___LOCK(lock); } while (0) 33 #define __LOCK_BH(lock) \ argument 34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) 36 #define __LOCK_IRQ(lock) \ argument 37 do { local_irq_disable(); __LOCK(lock); } while (0) [all …]
|
| H A D | rwlock.h | 18 extern void __rwlock_init(rwlock_t *lock, const char *name, 20 # define rwlock_init(lock) \ argument 24 __rwlock_init((lock), #lock, &__key); \ 27 # define rwlock_init(lock) \ argument 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); 33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) argument 34 extern int do_raw_read_trylock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); [all …]
|
| H A D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 __acquires(lock); 26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 __acquires(lock); 32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 __acquires(lock); [all …]
|
| H A D | spinlock.h | 67 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 96 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 99 # define raw_spin_lock_init(lock) \ argument 103 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 107 # define raw_spin_lock_init(lock) \ argument 108 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 111 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) argument 114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) argument 116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 121 * between program-order earlier lock acquisitions and program-order later [all …]
|
| H A D | local_lock.h | 8 * local_lock_init - Runtime initialize a lock instance 10 #define local_lock_init(lock) __local_lock_init(lock) argument 13 * local_lock - Acquire a per CPU local lock 14 * @lock: The lock variable 16 #define local_lock(lock) __local_lock(lock) argument 19 * local_lock_irq - Acquire a per CPU local lock and disable interrupts 20 * @lock: The lock variable 22 #define local_lock_irq(lock) __local_lock_irq(lock) argument 25 * local_lock_irqsave - Acquire a per CPU local lock, save and disable 27 * @lock: The lock variable [all …]
|
| /OK3568_Linux_fs/kernel/kernel/locking/ |
| H A D | spinlock_debug.c | 16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, in __raw_spin_lock_init() argument 21 * Make sure we are not reinitializing a held lock: in __raw_spin_lock_init() 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 24 lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); in __raw_spin_lock_init() 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in __raw_spin_lock_init() 27 lock->magic = SPINLOCK_MAGIC; in __raw_spin_lock_init() 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 29 lock->owner_cpu = -1; in __raw_spin_lock_init() 34 void __rwlock_init(rwlock_t *lock, const char *name, in __rwlock_init() argument 39 * Make sure we are not reinitializing a held lock: in __rwlock_init() [all …]
|
| H A D | rtmutex.c | 27 * lock->owner state tracking: 29 * lock->owner holds the task_struct pointer of the owner. Bit 0 30 * is used to keep track of the "lock has waiters" state. 33 * NULL 0 lock is free (fast acquire possible) 34 * NULL 1 lock is free and has waiters and the top waiter 35 * is going to take the lock* 36 * taskpointer 0 lock is held (fast release possible) 37 * taskpointer 1 lock is held and has waiters** 40 * possible when bit 0 of lock->owner is 0. 42 * (*) It also can be a transitional state when grabbing the lock [all …]
|
| H A D | mutex.c | 42 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) in __mutex_init() argument 44 atomic_long_set(&lock->owner, 0); in __mutex_init() 45 spin_lock_init(&lock->wait_lock); in __mutex_init() 46 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init() 48 osq_lock_init(&lock->osq); in __mutex_init() 51 debug_mutex_init(lock, name, key); in __mutex_init() 56 * @owner: contains: 'struct task_struct *' to the current lock owner, 61 * Bit1 indicates unlock needs to hand the lock to the top-waiter 75 static inline struct task_struct *__mutex_owner(struct mutex *lock) in __mutex_owner() argument 77 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner() [all …]
|
| H A D | spinlock.c | 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 46 * Some architectures can relax in favour of the CPU owning the lock. 63 * This could be a long-held lock. We both prepare to spin for a long 65 * towards that other CPU that it should break the lock ASAP. 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/mali400/mali/linux/ |
| H A D | mali_osk_locks.h | 13 * Defines OS abstraction of lock and mutex 48 /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */ 79 * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */ 84 /** @brief This function can return a given lock's owner when DEBUG is enabled. */ 85 static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock) in _mali_osk_lock_get_owner() argument 87 return lock->owner; in _mali_osk_lock_get_owner() 98 _mali_osk_spinlock_t *lock = NULL; in _mali_osk_spinlock_init() local 100 lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL); in _mali_osk_spinlock_init() 101 if (NULL == lock) { in _mali_osk_spinlock_init() 104 spin_lock_init(&lock->spinlock); in _mali_osk_spinlock_init() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/ |
| H A D | ttm_lock.c | 45 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument 47 spin_lock_init(&lock->lock); in ttm_lock_init() 48 init_waitqueue_head(&lock->queue); in ttm_lock_init() 49 lock->rw = 0; in ttm_lock_init() 50 lock->flags = 0; in ttm_lock_init() 53 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument 55 spin_lock(&lock->lock); in ttm_read_unlock() 56 if (--lock->rw == 0) in ttm_read_unlock() 57 wake_up_all(&lock->queue); in ttm_read_unlock() 58 spin_unlock(&lock->lock); in ttm_read_unlock() [all …]
|
| H A D | ttm_lock.h | 33 * of the DRM heavyweight hardware lock. 34 * The lock is a read-write lock. Taking it in read mode and write mode 39 * It's allowed to leave kernel space with the vt lock held. 40 * If a user-space process dies while having the vt-lock, 41 * it will be released during the file descriptor release. The vt lock 42 * excludes write lock and read lock. 44 * The suspend mode is used to lock out all TTM users when preparing for 60 * @base: ttm base object used solely to release the lock if the client 61 * holding the lock dies. 62 * @queue: Queue for processes waiting for lock change-of-status. [all …]
|
| /OK3568_Linux_fs/external/security/rk_tee_user/v2/export-ta_arm32/include/sys/ |
| H A D | lock.h | 4 /* dummy lock routines for single-threaded aps */ 14 #define __LOCK_INIT(class,lock) static int lock = 0; argument 15 #define __LOCK_INIT_RECURSIVE(class,lock) static int lock = 0; argument 16 #define __lock_init(lock) ((void) 0) argument 17 #define __lock_init_recursive(lock) ((void) 0) argument 18 #define __lock_close(lock) ((void) 0) argument 19 #define __lock_close_recursive(lock) ((void) 0) argument 20 #define __lock_acquire(lock) ((void) 0) argument 21 #define __lock_acquire_recursive(lock) ((void) 0) argument 22 #define __lock_try_acquire(lock) ((void) 0) argument [all …]
|
| /OK3568_Linux_fs/external/security/rk_tee_user/v2/export-ta_arm64/include/sys/ |
| H A D | lock.h | 4 /* dummy lock routines for single-threaded aps */ 14 #define __LOCK_INIT(class,lock) static int lock = 0; argument 15 #define __LOCK_INIT_RECURSIVE(class,lock) static int lock = 0; argument 16 #define __lock_init(lock) ((void) 0) argument 17 #define __lock_init_recursive(lock) ((void) 0) argument 18 #define __lock_close(lock) ((void) 0) argument 19 #define __lock_close_recursive(lock) ((void) 0) argument 20 #define __lock_acquire(lock) ((void) 0) argument 21 #define __lock_acquire_recursive(lock) ((void) 0) argument 22 #define __lock_try_acquire(lock) ((void) 0) argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/ |
| H A D | drm_lock.c | 50 * Take the heavyweight lock. 52 * \param lock lock pointer. 54 * \return one if the lock is held, or zero otherwise. 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 63 volatile unsigned int *lock = &lock_data->hw_lock->lock; in drm_lock_take() local 67 old = *lock; in drm_lock_take() 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 82 DRM_ERROR("%d holds heavyweight lock\n", in drm_lock_take() 90 /* Have lock */ in drm_lock_take() 97 * This takes a lock forcibly and hands it to context. Should ONLY be used [all …]
|
| /OK3568_Linux_fs/kernel/fs/ocfs2/dlm/ |
| H A D | dlmast.c | 37 struct dlm_lock *lock); 38 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 41 * lock level will obsolete a pending bast. 42 * For example, if dlm_thread queued a bast for an EX lock that 44 * lock owner downconverted to NL, the bast is now obsolete. 46 * This is needed because the lock and convert paths can queue 49 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument 52 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast() 54 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast() 56 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast() [all …]
|
| H A D | dlmlock.c | 7 * underlying calls for lock creation 47 struct dlm_lock *lock, int flags); 51 static void dlm_lock_detach_lockres(struct dlm_lock *lock); 68 /* Tell us whether we can grant a new lock request. 73 * returns: 1 if the lock can be granted, 0 otherwise. 76 struct dlm_lock *lock) in dlm_can_grant_new_lock() argument 81 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 86 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) in dlm_can_grant_new_lock() 89 lock->ml.type)) in dlm_can_grant_new_lock() 96 /* performs lock creation at the lockres master site [all …]
|
| H A D | dlmconvert.c | 7 * underlying calls for lock conversion 40 * only one that holds a lock on exit (res->spinlock). 45 struct dlm_lock *lock, int flags, 50 struct dlm_lock *lock, int flags, int type); 63 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 74 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master() 85 dlm_queue_ast(dlm, lock); in dlmconvert_master() 95 /* performs lock conversion at the lockres master site 98 * taken: takes and drops lock->spinlock 101 * call_ast: whether ast should be called for this lock [all …]
|
| /OK3568_Linux_fs/kernel/drivers/md/persistent-data/ |
| H A D | dm-block-manager.c | 31 * trace is also emitted for the previous lock acquisition. 44 spinlock_t lock; member 60 static unsigned __find_holder(struct block_lock *lock, in __find_holder() argument 66 if (lock->holders[i] == task) in __find_holder() 73 /* call this *after* you increment lock->count */ 74 static void __add_holder(struct block_lock *lock, struct task_struct *task) in __add_holder() argument 76 unsigned h = __find_holder(lock, NULL); in __add_holder() 82 lock->holders[h] = task; in __add_holder() 85 t = lock->traces + h; in __add_holder() 90 /* call this *before* you decrement lock->count */ [all …]
|
| /OK3568_Linux_fs/kernel/fs/btrfs/ |
| H A D | locking.c | 29 * - spinning lock semantics 30 * - blocking lock semantics 31 * - try-lock semantics for readers and writers 32 * - one level nesting, allowing read lock to be taken by the same thread that 33 * already has write lock 46 * denotes how many times the blocking lock was held; 49 * Write lock always allows only one thread to access the data. 60 * Lock recursion 188 * Mark already held read lock as blocking. Can be nested in write lock by the 192 * on the lock will not actively spin but sleep instead. [all …]
|
| /OK3568_Linux_fs/kernel/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
| H A D | locks.h | 20 /* Only use one lock mechanism. Select which one. */ 26 static inline void lock_impl_lock(struct lock_impl *lock) in lock_impl_lock() argument 28 BUG_ON(pthread_mutex_lock(&lock->mutex)); in lock_impl_lock() 31 static inline void lock_impl_unlock(struct lock_impl *lock) in lock_impl_unlock() argument 33 BUG_ON(pthread_mutex_unlock(&lock->mutex)); in lock_impl_unlock() 36 static inline bool lock_impl_trylock(struct lock_impl *lock) in lock_impl_trylock() argument 38 int err = pthread_mutex_trylock(&lock->mutex); in lock_impl_trylock() 47 static inline void lock_impl_init(struct lock_impl *lock) in lock_impl_init() argument 49 pthread_mutex_init(&lock->mutex, NULL); in lock_impl_init() 55 /* Spinlock that assumes that it always gets the lock immediately. */ [all …]
|
| /OK3568_Linux_fs/kernel/Documentation/locking/ |
| H A D | lockdep-design.rst | 8 Lock-class 15 tens of thousands of) instantiations. For example a lock in the inode 17 lock class. 19 The validator tracks the 'usage state' of lock-classes, and it tracks 20 the dependencies between different lock-classes. Lock usage indicates 21 how a lock is used with regard to its IRQ contexts, while lock 22 dependency can be understood as lock order, where L1 -> L2 suggests that 26 continuing effort to prove lock usages and dependencies are correct or 29 A lock-class's behavior is constructed by its instances collectively: 30 when the first instance of a lock-class is used after bootup the class [all …]
|
| /OK3568_Linux_fs/kernel/arch/csky/include/asm/ |
| H A D | spinlock.h | 14 static inline void arch_spin_lock(arch_spinlock_t *lock) in arch_spin_lock() argument 18 u32 *p = &lock->lock; in arch_spin_lock() 32 lockval.tickets.owner = READ_ONCE(lock->tickets.owner); in arch_spin_lock() 37 static inline int arch_spin_trylock(arch_spinlock_t *lock) in arch_spin_trylock() argument 41 u32 *p = &lock->lock; in arch_spin_trylock() 65 static inline void arch_spin_unlock(arch_spinlock_t *lock) in arch_spin_unlock() argument 68 WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); in arch_spin_unlock() 71 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) in arch_spin_value_unlocked() argument 73 return lock.tickets.owner == lock.tickets.next; in arch_spin_value_unlocked() 76 static inline int arch_spin_is_locked(arch_spinlock_t *lock) in arch_spin_is_locked() argument [all …]
|
| /OK3568_Linux_fs/kernel/include/asm-generic/ |
| H A D | qrwlock.h | 3 * Queue read/write lock 22 #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 30 extern void queued_read_lock_slowpath(struct qrwlock *lock); 31 extern void queued_write_lock_slowpath(struct qrwlock *lock); 34 * queued_read_trylock - try to acquire read lock of a queue rwlock 35 * @lock : Pointer to queue rwlock structure 36 * Return: 1 if lock acquired, 0 if failed 38 static inline int queued_read_trylock(struct qrwlock *lock) in queued_read_trylock() argument 42 cnts = atomic_read(&lock->cnts); in queued_read_trylock() 44 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); in queued_read_trylock() [all …]
|