Lines Matching refs:cmdq

341 	struct arm_smmu_queue *q = &smmu->cmdq.q;  in arm_smmu_cmdq_build_sync_cmd()
369 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_skip_err()
427 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument
437 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
441 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
442 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
445 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument
447 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
450 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_tryunlock() argument
452 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
455 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_shared_tryunlock()
459 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ argument
463 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
469 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ argument
471 atomic_set_release(&cmdq->lock, 0); \
515 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_set_valid_map() argument
520 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
535 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
562 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_set_valid_map() argument
565 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); in arm_smmu_cmdq_set_valid_map()
569 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_valid_map() argument
572 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); in arm_smmu_cmdq_poll_valid_map()
581 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_poll_until_not_full() local
588 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { in arm_smmu_cmdq_poll_until_not_full()
589 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
590 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); in arm_smmu_cmdq_poll_until_not_full()
591 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
597 llq->val = READ_ONCE(smmu->cmdq.q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
616 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in __arm_smmu_cmdq_poll_until_msi() local
617 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
639 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in __arm_smmu_cmdq_poll_until_consumed() local
644 llq->val = READ_ONCE(smmu->cmdq.q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
679 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
694 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, in arm_smmu_cmdq_write_entries() argument
699 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
707 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
734 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_issue_cmdlist() local
736 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_issue_cmdlist()
742 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
757 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
771 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); in arm_smmu_cmdq_issue_cmdlist()
775 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
783 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
788 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); in arm_smmu_cmdq_issue_cmdlist()
793 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
797 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
805 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); in arm_smmu_cmdq_issue_cmdlist()
811 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
818 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
829 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
830 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
837 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { in arm_smmu_cmdq_issue_cmdlist()
838 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
839 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
2641 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init() local
2642 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
2645 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
2646 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
2653 cmdq->valid_map = bitmap; in arm_smmu_cmdq_init()
2665 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, in arm_smmu_init_queues()
3051 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
3052 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
3053 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
3256 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3258 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()