1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * fence-chain: chain fences together in a timeline 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2018 Advanced Micro Devices, Inc. 6*4882a593Smuzhiyun * Authors: 7*4882a593Smuzhiyun * Christian König <christian.koenig@amd.com> 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun #ifndef __LINUX_DMA_FENCE_CHAIN_H 11*4882a593Smuzhiyun #define __LINUX_DMA_FENCE_CHAIN_H 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #include <linux/dma-fence.h> 14*4882a593Smuzhiyun #include <linux/irq_work.h> 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun /** 17*4882a593Smuzhiyun * struct dma_fence_chain - fence to represent an node of a fence chain 18*4882a593Smuzhiyun * @base: fence base class 19*4882a593Smuzhiyun * @lock: spinlock for fence handling 20*4882a593Smuzhiyun * @prev: previous fence of the chain 21*4882a593Smuzhiyun * @prev_seqno: original previous seqno before garbage collection 22*4882a593Smuzhiyun * @fence: encapsulated fence 23*4882a593Smuzhiyun * @cb: callback structure for signaling 24*4882a593Smuzhiyun * @work: irq work item for signaling 25*4882a593Smuzhiyun */ 26*4882a593Smuzhiyun struct dma_fence_chain { 27*4882a593Smuzhiyun struct dma_fence base; 28*4882a593Smuzhiyun spinlock_t lock; 29*4882a593Smuzhiyun struct dma_fence __rcu *prev; 30*4882a593Smuzhiyun u64 prev_seqno; 31*4882a593Smuzhiyun struct dma_fence *fence; 32*4882a593Smuzhiyun struct dma_fence_cb cb; 33*4882a593Smuzhiyun struct irq_work work; 34*4882a593Smuzhiyun }; 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun extern const struct dma_fence_ops dma_fence_chain_ops; 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun /** 39*4882a593Smuzhiyun * to_dma_fence_chain - cast a fence to a dma_fence_chain 40*4882a593Smuzhiyun * @fence: fence to cast to a dma_fence_array 41*4882a593Smuzhiyun * 42*4882a593Smuzhiyun * Returns NULL if the fence is not a dma_fence_chain, 43*4882a593Smuzhiyun * or the dma_fence_chain otherwise. 44*4882a593Smuzhiyun */ 45*4882a593Smuzhiyun static inline struct dma_fence_chain * to_dma_fence_chain(struct dma_fence * fence)46*4882a593Smuzhiyunto_dma_fence_chain(struct dma_fence *fence) 47*4882a593Smuzhiyun { 48*4882a593Smuzhiyun if (!fence || fence->ops != &dma_fence_chain_ops) 49*4882a593Smuzhiyun return NULL; 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun return container_of(fence, struct dma_fence_chain, base); 52*4882a593Smuzhiyun } 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /** 55*4882a593Smuzhiyun * dma_fence_chain_for_each - iterate over all fences in chain 56*4882a593Smuzhiyun * @iter: current fence 57*4882a593Smuzhiyun * @head: starting point 58*4882a593Smuzhiyun * 59*4882a593Smuzhiyun * Iterate over all fences in the chain. We keep a reference to the current 60*4882a593Smuzhiyun * fence while inside the loop which must be dropped when breaking out. 61*4882a593Smuzhiyun */ 62*4882a593Smuzhiyun #define dma_fence_chain_for_each(iter, head) \ 63*4882a593Smuzhiyun for (iter = dma_fence_get(head); iter; \ 64*4882a593Smuzhiyun iter = dma_fence_chain_walk(iter)) 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence); 67*4882a593Smuzhiyun int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno); 68*4882a593Smuzhiyun void dma_fence_chain_init(struct dma_fence_chain *chain, 69*4882a593Smuzhiyun struct dma_fence *prev, 70*4882a593Smuzhiyun struct dma_fence *fence, 71*4882a593Smuzhiyun uint64_t seqno); 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun #endif /* __LINUX_DMA_FENCE_CHAIN_H */ 74