xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdkfd/kfd_events.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2014 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/mm_types.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/types.h>
26*4882a593Smuzhiyun #include <linux/sched/signal.h>
27*4882a593Smuzhiyun #include <linux/sched/mm.h>
28*4882a593Smuzhiyun #include <linux/uaccess.h>
29*4882a593Smuzhiyun #include <linux/mman.h>
30*4882a593Smuzhiyun #include <linux/memory.h>
31*4882a593Smuzhiyun #include "kfd_priv.h"
32*4882a593Smuzhiyun #include "kfd_events.h"
33*4882a593Smuzhiyun #include "kfd_iommu.h"
34*4882a593Smuzhiyun #include <linux/device.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Wrapper around wait_queue_entry_t
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct kfd_event_waiter {
40*4882a593Smuzhiyun 	wait_queue_entry_t wait;
41*4882a593Smuzhiyun 	struct kfd_event *event; /* Event to wait for */
42*4882a593Smuzhiyun 	bool activated;		 /* Becomes true when event is signaled */
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Each signal event needs a 64-bit signal slot where the signaler will write
47*4882a593Smuzhiyun  * a 1 before sending an interrupt. (This is needed because some interrupts
48*4882a593Smuzhiyun  * do not contain enough spare data bits to identify an event.)
49*4882a593Smuzhiyun  * We get whole pages and map them to the process VA.
50*4882a593Smuzhiyun  * Individual signal events use their event_id as slot index.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun struct kfd_signal_page {
53*4882a593Smuzhiyun 	uint64_t *kernel_address;
54*4882a593Smuzhiyun 	uint64_t __user *user_address;
55*4882a593Smuzhiyun 	bool need_to_free_pages;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 
page_slots(struct kfd_signal_page * page)59*4882a593Smuzhiyun static uint64_t *page_slots(struct kfd_signal_page *page)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	return page->kernel_address;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
allocate_signal_page(struct kfd_process * p)64*4882a593Smuzhiyun static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	void *backing_store;
67*4882a593Smuzhiyun 	struct kfd_signal_page *page;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	page = kzalloc(sizeof(*page), GFP_KERNEL);
70*4882a593Smuzhiyun 	if (!page)
71*4882a593Smuzhiyun 		return NULL;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	backing_store = (void *) __get_free_pages(GFP_KERNEL,
74*4882a593Smuzhiyun 					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
75*4882a593Smuzhiyun 	if (!backing_store)
76*4882a593Smuzhiyun 		goto fail_alloc_signal_store;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* Initialize all events to unsignaled */
79*4882a593Smuzhiyun 	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
80*4882a593Smuzhiyun 	       KFD_SIGNAL_EVENT_LIMIT * 8);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	page->kernel_address = backing_store;
83*4882a593Smuzhiyun 	page->need_to_free_pages = true;
84*4882a593Smuzhiyun 	pr_debug("Allocated new event signal page at %p, for process %p\n",
85*4882a593Smuzhiyun 			page, p);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return page;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun fail_alloc_signal_store:
90*4882a593Smuzhiyun 	kfree(page);
91*4882a593Smuzhiyun 	return NULL;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
allocate_event_notification_slot(struct kfd_process * p,struct kfd_event * ev)94*4882a593Smuzhiyun static int allocate_event_notification_slot(struct kfd_process *p,
95*4882a593Smuzhiyun 					    struct kfd_event *ev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	int id;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (!p->signal_page) {
100*4882a593Smuzhiyun 		p->signal_page = allocate_signal_page(p);
101*4882a593Smuzhiyun 		if (!p->signal_page)
102*4882a593Smuzhiyun 			return -ENOMEM;
103*4882a593Smuzhiyun 		/* Oldest user mode expects 256 event slots */
104*4882a593Smuzhiyun 		p->signal_mapped_size = 256*8;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/*
108*4882a593Smuzhiyun 	 * Compatibility with old user mode: Only use signal slots
109*4882a593Smuzhiyun 	 * user mode has mapped, may be less than
110*4882a593Smuzhiyun 	 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
111*4882a593Smuzhiyun 	 * of the event limit without breaking user mode.
112*4882a593Smuzhiyun 	 */
113*4882a593Smuzhiyun 	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
114*4882a593Smuzhiyun 		       GFP_KERNEL);
115*4882a593Smuzhiyun 	if (id < 0)
116*4882a593Smuzhiyun 		return id;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ev->event_id = id;
119*4882a593Smuzhiyun 	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * Assumes that p->event_mutex is held and of course that p is not going
126*4882a593Smuzhiyun  * away (current or locked).
127*4882a593Smuzhiyun  */
lookup_event_by_id(struct kfd_process * p,uint32_t id)128*4882a593Smuzhiyun static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	return idr_find(&p->event_idr, id);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
135*4882a593Smuzhiyun  * @p:     Pointer to struct kfd_process
136*4882a593Smuzhiyun  * @id:    ID to look up
137*4882a593Smuzhiyun  * @bits:  Number of valid bits in @id
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * Finds the first signaled event with a matching partial ID. If no
140*4882a593Smuzhiyun  * matching signaled event is found, returns NULL. In that case the
141*4882a593Smuzhiyun  * caller should assume that the partial ID is invalid and do an
142*4882a593Smuzhiyun  * exhaustive search of all siglaned events.
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * If multiple events with the same partial ID signal at the same
145*4882a593Smuzhiyun  * time, they will be found one interrupt at a time, not necessarily
146*4882a593Smuzhiyun  * in the same order the interrupts occurred. As long as the number of
147*4882a593Smuzhiyun  * interrupts is correct, all signaled events will be seen by the
148*4882a593Smuzhiyun  * driver.
149*4882a593Smuzhiyun  */
lookup_signaled_event_by_partial_id(struct kfd_process * p,uint32_t id,uint32_t bits)150*4882a593Smuzhiyun static struct kfd_event *lookup_signaled_event_by_partial_id(
151*4882a593Smuzhiyun 	struct kfd_process *p, uint32_t id, uint32_t bits)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct kfd_event *ev;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
156*4882a593Smuzhiyun 		return NULL;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Fast path for the common case that @id is not a partial ID
159*4882a593Smuzhiyun 	 * and we only need a single lookup.
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
162*4882a593Smuzhiyun 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
163*4882a593Smuzhiyun 			return NULL;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		return idr_find(&p->event_idr, id);
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* General case for partial IDs: Iterate over all matching IDs
169*4882a593Smuzhiyun 	 * and find the first one that has signaled.
170*4882a593Smuzhiyun 	 */
171*4882a593Smuzhiyun 	for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
172*4882a593Smuzhiyun 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
173*4882a593Smuzhiyun 			continue;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		ev = idr_find(&p->event_idr, id);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return ev;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
create_signal_event(struct file * devkfd,struct kfd_process * p,struct kfd_event * ev)181*4882a593Smuzhiyun static int create_signal_event(struct file *devkfd,
182*4882a593Smuzhiyun 				struct kfd_process *p,
183*4882a593Smuzhiyun 				struct kfd_event *ev)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	int ret;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (p->signal_mapped_size &&
188*4882a593Smuzhiyun 	    p->signal_event_count == p->signal_mapped_size / 8) {
189*4882a593Smuzhiyun 		if (!p->signal_event_limit_reached) {
190*4882a593Smuzhiyun 			pr_debug("Signal event wasn't created because limit was reached\n");
191*4882a593Smuzhiyun 			p->signal_event_limit_reached = true;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 		return -ENOSPC;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	ret = allocate_event_notification_slot(p, ev);
197*4882a593Smuzhiyun 	if (ret) {
198*4882a593Smuzhiyun 		pr_warn("Signal event wasn't created because out of kernel memory\n");
199*4882a593Smuzhiyun 		return ret;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	p->signal_event_count++;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
205*4882a593Smuzhiyun 	pr_debug("Signal event number %zu created with id %d, address %p\n",
206*4882a593Smuzhiyun 			p->signal_event_count, ev->event_id,
207*4882a593Smuzhiyun 			ev->user_signal_address);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
create_other_event(struct kfd_process * p,struct kfd_event * ev)212*4882a593Smuzhiyun static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
215*4882a593Smuzhiyun 	 * intentional integer overflow to -1 without a compiler
216*4882a593Smuzhiyun 	 * warning. idr_alloc treats a negative value as "maximum
217*4882a593Smuzhiyun 	 * signed integer".
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
220*4882a593Smuzhiyun 			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
221*4882a593Smuzhiyun 			   GFP_KERNEL);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (id < 0)
224*4882a593Smuzhiyun 		return id;
225*4882a593Smuzhiyun 	ev->event_id = id;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
kfd_event_init_process(struct kfd_process * p)230*4882a593Smuzhiyun void kfd_event_init_process(struct kfd_process *p)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	mutex_init(&p->event_mutex);
233*4882a593Smuzhiyun 	idr_init(&p->event_idr);
234*4882a593Smuzhiyun 	p->signal_page = NULL;
235*4882a593Smuzhiyun 	p->signal_event_count = 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
destroy_event(struct kfd_process * p,struct kfd_event * ev)238*4882a593Smuzhiyun static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct kfd_event_waiter *waiter;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Wake up pending waiters. They will return failure */
243*4882a593Smuzhiyun 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
244*4882a593Smuzhiyun 		waiter->event = NULL;
245*4882a593Smuzhiyun 	wake_up_all(&ev->wq);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
248*4882a593Smuzhiyun 	    ev->type == KFD_EVENT_TYPE_DEBUG)
249*4882a593Smuzhiyun 		p->signal_event_count--;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	idr_remove(&p->event_idr, ev->event_id);
252*4882a593Smuzhiyun 	kfree(ev);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
destroy_events(struct kfd_process * p)255*4882a593Smuzhiyun static void destroy_events(struct kfd_process *p)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct kfd_event *ev;
258*4882a593Smuzhiyun 	uint32_t id;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	idr_for_each_entry(&p->event_idr, ev, id)
261*4882a593Smuzhiyun 		destroy_event(p, ev);
262*4882a593Smuzhiyun 	idr_destroy(&p->event_idr);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun  * We assume that the process is being destroyed and there is no need to
267*4882a593Smuzhiyun  * unmap the pages or keep bookkeeping data in order.
268*4882a593Smuzhiyun  */
shutdown_signal_page(struct kfd_process * p)269*4882a593Smuzhiyun static void shutdown_signal_page(struct kfd_process *p)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct kfd_signal_page *page = p->signal_page;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (page) {
274*4882a593Smuzhiyun 		if (page->need_to_free_pages)
275*4882a593Smuzhiyun 			free_pages((unsigned long)page->kernel_address,
276*4882a593Smuzhiyun 				   get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
277*4882a593Smuzhiyun 		kfree(page);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
kfd_event_free_process(struct kfd_process * p)281*4882a593Smuzhiyun void kfd_event_free_process(struct kfd_process *p)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	destroy_events(p);
284*4882a593Smuzhiyun 	shutdown_signal_page(p);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
event_can_be_gpu_signaled(const struct kfd_event * ev)287*4882a593Smuzhiyun static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
290*4882a593Smuzhiyun 					ev->type == KFD_EVENT_TYPE_DEBUG;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
event_can_be_cpu_signaled(const struct kfd_event * ev)293*4882a593Smuzhiyun static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	return ev->type == KFD_EVENT_TYPE_SIGNAL;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
kfd_event_page_set(struct kfd_process * p,void * kernel_address,uint64_t size)298*4882a593Smuzhiyun int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
299*4882a593Smuzhiyun 		       uint64_t size)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct kfd_signal_page *page;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (p->signal_page)
304*4882a593Smuzhiyun 		return -EBUSY;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	page = kzalloc(sizeof(*page), GFP_KERNEL);
307*4882a593Smuzhiyun 	if (!page)
308*4882a593Smuzhiyun 		return -ENOMEM;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* Initialize all events to unsignaled */
311*4882a593Smuzhiyun 	memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
312*4882a593Smuzhiyun 	       KFD_SIGNAL_EVENT_LIMIT * 8);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	page->kernel_address = kernel_address;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	p->signal_page = page;
317*4882a593Smuzhiyun 	p->signal_mapped_size = size;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
kfd_event_create(struct file * devkfd,struct kfd_process * p,uint32_t event_type,bool auto_reset,uint32_t node_id,uint32_t * event_id,uint32_t * event_trigger_data,uint64_t * event_page_offset,uint32_t * event_slot_index)322*4882a593Smuzhiyun int kfd_event_create(struct file *devkfd, struct kfd_process *p,
323*4882a593Smuzhiyun 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
324*4882a593Smuzhiyun 		     uint32_t *event_id, uint32_t *event_trigger_data,
325*4882a593Smuzhiyun 		     uint64_t *event_page_offset, uint32_t *event_slot_index)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int ret = 0;
328*4882a593Smuzhiyun 	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (!ev)
331*4882a593Smuzhiyun 		return -ENOMEM;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	ev->type = event_type;
334*4882a593Smuzhiyun 	ev->auto_reset = auto_reset;
335*4882a593Smuzhiyun 	ev->signaled = false;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	init_waitqueue_head(&ev->wq);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	*event_page_offset = 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	switch (event_type) {
344*4882a593Smuzhiyun 	case KFD_EVENT_TYPE_SIGNAL:
345*4882a593Smuzhiyun 	case KFD_EVENT_TYPE_DEBUG:
346*4882a593Smuzhiyun 		ret = create_signal_event(devkfd, p, ev);
347*4882a593Smuzhiyun 		if (!ret) {
348*4882a593Smuzhiyun 			*event_page_offset = KFD_MMAP_TYPE_EVENTS;
349*4882a593Smuzhiyun 			*event_slot_index = ev->event_id;
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 		break;
352*4882a593Smuzhiyun 	default:
353*4882a593Smuzhiyun 		ret = create_other_event(p, ev);
354*4882a593Smuzhiyun 		break;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (!ret) {
358*4882a593Smuzhiyun 		*event_id = ev->event_id;
359*4882a593Smuzhiyun 		*event_trigger_data = ev->event_id;
360*4882a593Smuzhiyun 	} else {
361*4882a593Smuzhiyun 		kfree(ev);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	return ret;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /* Assumes that p is current. */
kfd_event_destroy(struct kfd_process * p,uint32_t event_id)370*4882a593Smuzhiyun int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct kfd_event *ev;
373*4882a593Smuzhiyun 	int ret = 0;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	ev = lookup_event_by_id(p, event_id);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (ev)
380*4882a593Smuzhiyun 		destroy_event(p, ev);
381*4882a593Smuzhiyun 	else
382*4882a593Smuzhiyun 		ret = -EINVAL;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
385*4882a593Smuzhiyun 	return ret;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
set_event(struct kfd_event * ev)388*4882a593Smuzhiyun static void set_event(struct kfd_event *ev)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct kfd_event_waiter *waiter;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Auto reset if the list is non-empty and we're waking
393*4882a593Smuzhiyun 	 * someone. waitqueue_active is safe here because we're
394*4882a593Smuzhiyun 	 * protected by the p->event_mutex, which is also held when
395*4882a593Smuzhiyun 	 * updating the wait queues in kfd_wait_on_events.
396*4882a593Smuzhiyun 	 */
397*4882a593Smuzhiyun 	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
400*4882a593Smuzhiyun 		waiter->activated = true;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	wake_up_all(&ev->wq);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* Assumes that p is current. */
kfd_set_event(struct kfd_process * p,uint32_t event_id)406*4882a593Smuzhiyun int kfd_set_event(struct kfd_process *p, uint32_t event_id)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	int ret = 0;
409*4882a593Smuzhiyun 	struct kfd_event *ev;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	ev = lookup_event_by_id(p, event_id);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (ev && event_can_be_cpu_signaled(ev))
416*4882a593Smuzhiyun 		set_event(ev);
417*4882a593Smuzhiyun 	else
418*4882a593Smuzhiyun 		ret = -EINVAL;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
421*4882a593Smuzhiyun 	return ret;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
reset_event(struct kfd_event * ev)424*4882a593Smuzhiyun static void reset_event(struct kfd_event *ev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	ev->signaled = false;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /* Assumes that p is current. */
kfd_reset_event(struct kfd_process * p,uint32_t event_id)430*4882a593Smuzhiyun int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	int ret = 0;
433*4882a593Smuzhiyun 	struct kfd_event *ev;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	ev = lookup_event_by_id(p, event_id);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (ev && event_can_be_cpu_signaled(ev))
440*4882a593Smuzhiyun 		reset_event(ev);
441*4882a593Smuzhiyun 	else
442*4882a593Smuzhiyun 		ret = -EINVAL;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
445*4882a593Smuzhiyun 	return ret;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
acknowledge_signal(struct kfd_process * p,struct kfd_event * ev)449*4882a593Smuzhiyun static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
set_event_from_interrupt(struct kfd_process * p,struct kfd_event * ev)454*4882a593Smuzhiyun static void set_event_from_interrupt(struct kfd_process *p,
455*4882a593Smuzhiyun 					struct kfd_event *ev)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	if (ev && event_can_be_gpu_signaled(ev)) {
458*4882a593Smuzhiyun 		acknowledge_signal(p, ev);
459*4882a593Smuzhiyun 		set_event(ev);
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
kfd_signal_event_interrupt(u32 pasid,uint32_t partial_id,uint32_t valid_id_bits)463*4882a593Smuzhiyun void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
464*4882a593Smuzhiyun 				uint32_t valid_id_bits)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	struct kfd_event *ev = NULL;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/*
469*4882a593Smuzhiyun 	 * Because we are called from arbitrary context (workqueue) as opposed
470*4882a593Smuzhiyun 	 * to process context, kfd_process could attempt to exit while we are
471*4882a593Smuzhiyun 	 * running so the lookup function increments the process ref count.
472*4882a593Smuzhiyun 	 */
473*4882a593Smuzhiyun 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (!p)
476*4882a593Smuzhiyun 		return; /* Presumably process exited. */
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (valid_id_bits)
481*4882a593Smuzhiyun 		ev = lookup_signaled_event_by_partial_id(p, partial_id,
482*4882a593Smuzhiyun 							 valid_id_bits);
483*4882a593Smuzhiyun 	if (ev) {
484*4882a593Smuzhiyun 		set_event_from_interrupt(p, ev);
485*4882a593Smuzhiyun 	} else if (p->signal_page) {
486*4882a593Smuzhiyun 		/*
487*4882a593Smuzhiyun 		 * Partial ID lookup failed. Assume that the event ID
488*4882a593Smuzhiyun 		 * in the interrupt payload was invalid and do an
489*4882a593Smuzhiyun 		 * exhaustive search of signaled events.
490*4882a593Smuzhiyun 		 */
491*4882a593Smuzhiyun 		uint64_t *slots = page_slots(p->signal_page);
492*4882a593Smuzhiyun 		uint32_t id;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		if (valid_id_bits)
495*4882a593Smuzhiyun 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
496*4882a593Smuzhiyun 					     partial_id, valid_id_bits);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
499*4882a593Smuzhiyun 			/* With relatively few events, it's faster to
500*4882a593Smuzhiyun 			 * iterate over the event IDR
501*4882a593Smuzhiyun 			 */
502*4882a593Smuzhiyun 			idr_for_each_entry(&p->event_idr, ev, id) {
503*4882a593Smuzhiyun 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
504*4882a593Smuzhiyun 					break;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 				if (slots[id] != UNSIGNALED_EVENT_SLOT)
507*4882a593Smuzhiyun 					set_event_from_interrupt(p, ev);
508*4882a593Smuzhiyun 			}
509*4882a593Smuzhiyun 		} else {
510*4882a593Smuzhiyun 			/* With relatively many events, it's faster to
511*4882a593Smuzhiyun 			 * iterate over the signal slots and lookup
512*4882a593Smuzhiyun 			 * only signaled events from the IDR.
513*4882a593Smuzhiyun 			 */
514*4882a593Smuzhiyun 			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
515*4882a593Smuzhiyun 				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
516*4882a593Smuzhiyun 					ev = lookup_event_by_id(p, id);
517*4882a593Smuzhiyun 					set_event_from_interrupt(p, ev);
518*4882a593Smuzhiyun 				}
519*4882a593Smuzhiyun 		}
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
523*4882a593Smuzhiyun 	kfd_unref_process(p);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
alloc_event_waiters(uint32_t num_events)526*4882a593Smuzhiyun static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct kfd_event_waiter *event_waiters;
529*4882a593Smuzhiyun 	uint32_t i;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	event_waiters = kmalloc_array(num_events,
532*4882a593Smuzhiyun 					sizeof(struct kfd_event_waiter),
533*4882a593Smuzhiyun 					GFP_KERNEL);
534*4882a593Smuzhiyun 	if (!event_waiters)
535*4882a593Smuzhiyun 		return NULL;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
538*4882a593Smuzhiyun 		init_wait(&event_waiters[i].wait);
539*4882a593Smuzhiyun 		event_waiters[i].activated = false;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return event_waiters;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
init_event_waiter_get_status(struct kfd_process * p,struct kfd_event_waiter * waiter,uint32_t event_id)545*4882a593Smuzhiyun static int init_event_waiter_get_status(struct kfd_process *p,
546*4882a593Smuzhiyun 		struct kfd_event_waiter *waiter,
547*4882a593Smuzhiyun 		uint32_t event_id)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct kfd_event *ev = lookup_event_by_id(p, event_id);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (!ev)
552*4882a593Smuzhiyun 		return -EINVAL;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	waiter->event = ev;
555*4882a593Smuzhiyun 	waiter->activated = ev->signaled;
556*4882a593Smuzhiyun 	ev->signaled = ev->signaled && !ev->auto_reset;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
init_event_waiter_add_to_waitlist(struct kfd_event_waiter * waiter)561*4882a593Smuzhiyun static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct kfd_event *ev = waiter->event;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* Only add to the wait list if we actually need to
566*4882a593Smuzhiyun 	 * wait on this event.
567*4882a593Smuzhiyun 	 */
568*4882a593Smuzhiyun 	if (!waiter->activated)
569*4882a593Smuzhiyun 		add_wait_queue(&ev->wq, &waiter->wait);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /* test_event_condition - Test condition of events being waited for
573*4882a593Smuzhiyun  * @all:           Return completion only if all events have signaled
574*4882a593Smuzhiyun  * @num_events:    Number of events to wait for
575*4882a593Smuzhiyun  * @event_waiters: Array of event waiters, one per event
576*4882a593Smuzhiyun  *
577*4882a593Smuzhiyun  * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
578*4882a593Smuzhiyun  * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
579*4882a593Smuzhiyun  * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
580*4882a593Smuzhiyun  * the events have been destroyed.
581*4882a593Smuzhiyun  */
test_event_condition(bool all,uint32_t num_events,struct kfd_event_waiter * event_waiters)582*4882a593Smuzhiyun static uint32_t test_event_condition(bool all, uint32_t num_events,
583*4882a593Smuzhiyun 				struct kfd_event_waiter *event_waiters)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	uint32_t i;
586*4882a593Smuzhiyun 	uint32_t activated_count = 0;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	for (i = 0; i < num_events; i++) {
589*4882a593Smuzhiyun 		if (!event_waiters[i].event)
590*4882a593Smuzhiyun 			return KFD_IOC_WAIT_RESULT_FAIL;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		if (event_waiters[i].activated) {
593*4882a593Smuzhiyun 			if (!all)
594*4882a593Smuzhiyun 				return KFD_IOC_WAIT_RESULT_COMPLETE;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 			activated_count++;
597*4882a593Smuzhiyun 		}
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	return activated_count == num_events ?
601*4882a593Smuzhiyun 		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun  * Copy event specific data, if defined.
606*4882a593Smuzhiyun  * Currently only memory exception events have additional data to copy to user
607*4882a593Smuzhiyun  */
copy_signaled_event_data(uint32_t num_events,struct kfd_event_waiter * event_waiters,struct kfd_event_data __user * data)608*4882a593Smuzhiyun static int copy_signaled_event_data(uint32_t num_events,
609*4882a593Smuzhiyun 		struct kfd_event_waiter *event_waiters,
610*4882a593Smuzhiyun 		struct kfd_event_data __user *data)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data *src;
613*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data __user *dst;
614*4882a593Smuzhiyun 	struct kfd_event_waiter *waiter;
615*4882a593Smuzhiyun 	struct kfd_event *event;
616*4882a593Smuzhiyun 	uint32_t i;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	for (i = 0; i < num_events; i++) {
619*4882a593Smuzhiyun 		waiter = &event_waiters[i];
620*4882a593Smuzhiyun 		event = waiter->event;
621*4882a593Smuzhiyun 		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
622*4882a593Smuzhiyun 			dst = &data[i].memory_exception_data;
623*4882a593Smuzhiyun 			src = &event->memory_exception_data;
624*4882a593Smuzhiyun 			if (copy_to_user(dst, src,
625*4882a593Smuzhiyun 				sizeof(struct kfd_hsa_memory_exception_data)))
626*4882a593Smuzhiyun 				return -EFAULT;
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	return 0;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 
user_timeout_to_jiffies(uint32_t user_timeout_ms)636*4882a593Smuzhiyun static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
639*4882a593Smuzhiyun 		return 0;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
642*4882a593Smuzhiyun 		return MAX_SCHEDULE_TIMEOUT;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/*
645*4882a593Smuzhiyun 	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
646*4882a593Smuzhiyun 	 * but we consider them finite.
647*4882a593Smuzhiyun 	 * This hack is wrong, but nobody is likely to notice.
648*4882a593Smuzhiyun 	 */
649*4882a593Smuzhiyun 	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	return msecs_to_jiffies(user_timeout_ms) + 1;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
free_waiters(uint32_t num_events,struct kfd_event_waiter * waiters)654*4882a593Smuzhiyun static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	uint32_t i;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	for (i = 0; i < num_events; i++)
659*4882a593Smuzhiyun 		if (waiters[i].event)
660*4882a593Smuzhiyun 			remove_wait_queue(&waiters[i].event->wq,
661*4882a593Smuzhiyun 					  &waiters[i].wait);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	kfree(waiters);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
kfd_wait_on_events(struct kfd_process * p,uint32_t num_events,void __user * data,bool all,uint32_t user_timeout_ms,uint32_t * wait_result)666*4882a593Smuzhiyun int kfd_wait_on_events(struct kfd_process *p,
667*4882a593Smuzhiyun 		       uint32_t num_events, void __user *data,
668*4882a593Smuzhiyun 		       bool all, uint32_t user_timeout_ms,
669*4882a593Smuzhiyun 		       uint32_t *wait_result)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	struct kfd_event_data __user *events =
672*4882a593Smuzhiyun 			(struct kfd_event_data __user *) data;
673*4882a593Smuzhiyun 	uint32_t i;
674*4882a593Smuzhiyun 	int ret = 0;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	struct kfd_event_waiter *event_waiters = NULL;
677*4882a593Smuzhiyun 	long timeout = user_timeout_to_jiffies(user_timeout_ms);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	event_waiters = alloc_event_waiters(num_events);
680*4882a593Smuzhiyun 	if (!event_waiters) {
681*4882a593Smuzhiyun 		ret = -ENOMEM;
682*4882a593Smuzhiyun 		goto out;
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	for (i = 0; i < num_events; i++) {
688*4882a593Smuzhiyun 		struct kfd_event_data event_data;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		if (copy_from_user(&event_data, &events[i],
691*4882a593Smuzhiyun 				sizeof(struct kfd_event_data))) {
692*4882a593Smuzhiyun 			ret = -EFAULT;
693*4882a593Smuzhiyun 			goto out_unlock;
694*4882a593Smuzhiyun 		}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		ret = init_event_waiter_get_status(p, &event_waiters[i],
697*4882a593Smuzhiyun 				event_data.event_id);
698*4882a593Smuzhiyun 		if (ret)
699*4882a593Smuzhiyun 			goto out_unlock;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/* Check condition once. */
703*4882a593Smuzhiyun 	*wait_result = test_event_condition(all, num_events, event_waiters);
704*4882a593Smuzhiyun 	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
705*4882a593Smuzhiyun 		ret = copy_signaled_event_data(num_events,
706*4882a593Smuzhiyun 					       event_waiters, events);
707*4882a593Smuzhiyun 		goto out_unlock;
708*4882a593Smuzhiyun 	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
709*4882a593Smuzhiyun 		/* This should not happen. Events shouldn't be
710*4882a593Smuzhiyun 		 * destroyed while we're holding the event_mutex
711*4882a593Smuzhiyun 		 */
712*4882a593Smuzhiyun 		goto out_unlock;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	/* Add to wait lists if we need to wait. */
716*4882a593Smuzhiyun 	for (i = 0; i < num_events; i++)
717*4882a593Smuzhiyun 		init_event_waiter_add_to_waitlist(&event_waiters[i]);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	while (true) {
722*4882a593Smuzhiyun 		if (fatal_signal_pending(current)) {
723*4882a593Smuzhiyun 			ret = -EINTR;
724*4882a593Smuzhiyun 			break;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		if (signal_pending(current)) {
728*4882a593Smuzhiyun 			/*
729*4882a593Smuzhiyun 			 * This is wrong when a nonzero, non-infinite timeout
730*4882a593Smuzhiyun 			 * is specified. We need to use
731*4882a593Smuzhiyun 			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
732*4882a593Smuzhiyun 			 * contains a union with data for each user and it's
733*4882a593Smuzhiyun 			 * in generic kernel code that I don't want to
734*4882a593Smuzhiyun 			 * touch yet.
735*4882a593Smuzhiyun 			 */
736*4882a593Smuzhiyun 			ret = -ERESTARTSYS;
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		/* Set task state to interruptible sleep before
741*4882a593Smuzhiyun 		 * checking wake-up conditions. A concurrent wake-up
742*4882a593Smuzhiyun 		 * will put the task back into runnable state. In that
743*4882a593Smuzhiyun 		 * case schedule_timeout will not put the task to
744*4882a593Smuzhiyun 		 * sleep and we'll get a chance to re-check the
745*4882a593Smuzhiyun 		 * updated conditions almost immediately. Otherwise,
746*4882a593Smuzhiyun 		 * this race condition would lead to a soft hang or a
747*4882a593Smuzhiyun 		 * very long sleep.
748*4882a593Smuzhiyun 		 */
749*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 		*wait_result = test_event_condition(all, num_events,
752*4882a593Smuzhiyun 						    event_waiters);
753*4882a593Smuzhiyun 		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
754*4882a593Smuzhiyun 			break;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		if (timeout <= 0)
757*4882a593Smuzhiyun 			break;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 		timeout = schedule_timeout(timeout);
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	/* copy_signaled_event_data may sleep. So this has to happen
764*4882a593Smuzhiyun 	 * after the task state is set back to RUNNING.
765*4882a593Smuzhiyun 	 */
766*4882a593Smuzhiyun 	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
767*4882a593Smuzhiyun 		ret = copy_signaled_event_data(num_events,
768*4882a593Smuzhiyun 					       event_waiters, events);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
771*4882a593Smuzhiyun out_unlock:
772*4882a593Smuzhiyun 	free_waiters(num_events, event_waiters);
773*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
774*4882a593Smuzhiyun out:
775*4882a593Smuzhiyun 	if (ret)
776*4882a593Smuzhiyun 		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
777*4882a593Smuzhiyun 	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
778*4882a593Smuzhiyun 		ret = -EIO;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	return ret;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
kfd_event_mmap(struct kfd_process * p,struct vm_area_struct * vma)783*4882a593Smuzhiyun int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	unsigned long pfn;
786*4882a593Smuzhiyun 	struct kfd_signal_page *page;
787*4882a593Smuzhiyun 	int ret;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* check required size doesn't exceed the allocated size */
790*4882a593Smuzhiyun 	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
791*4882a593Smuzhiyun 			get_order(vma->vm_end - vma->vm_start)) {
792*4882a593Smuzhiyun 		pr_err("Event page mmap requested illegal size\n");
793*4882a593Smuzhiyun 		return -EINVAL;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	page = p->signal_page;
797*4882a593Smuzhiyun 	if (!page) {
798*4882a593Smuzhiyun 		/* Probably KFD bug, but mmap is user-accessible. */
799*4882a593Smuzhiyun 		pr_debug("Signal page could not be found\n");
800*4882a593Smuzhiyun 		return -EINVAL;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	pfn = __pa(page->kernel_address);
804*4882a593Smuzhiyun 	pfn >>= PAGE_SHIFT;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
807*4882a593Smuzhiyun 		       | VM_DONTDUMP | VM_PFNMAP;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	pr_debug("Mapping signal page\n");
810*4882a593Smuzhiyun 	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
811*4882a593Smuzhiyun 	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
812*4882a593Smuzhiyun 	pr_debug("     pfn                 == 0x%016lX\n", pfn);
813*4882a593Smuzhiyun 	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
814*4882a593Smuzhiyun 	pr_debug("     size                == 0x%08lX\n",
815*4882a593Smuzhiyun 			vma->vm_end - vma->vm_start);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	page->user_address = (uint64_t __user *)vma->vm_start;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	/* mapping the page to user process */
820*4882a593Smuzhiyun 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
821*4882a593Smuzhiyun 			vma->vm_end - vma->vm_start, vma->vm_page_prot);
822*4882a593Smuzhiyun 	if (!ret)
823*4882a593Smuzhiyun 		p->signal_mapped_size = vma->vm_end - vma->vm_start;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return ret;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun  * Assumes that p->event_mutex is held and of course
830*4882a593Smuzhiyun  * that p is not going away (current or locked).
831*4882a593Smuzhiyun  */
lookup_events_by_type_and_signal(struct kfd_process * p,int type,void * event_data)832*4882a593Smuzhiyun static void lookup_events_by_type_and_signal(struct kfd_process *p,
833*4882a593Smuzhiyun 		int type, void *event_data)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data *ev_data;
836*4882a593Smuzhiyun 	struct kfd_event *ev;
837*4882a593Smuzhiyun 	uint32_t id;
838*4882a593Smuzhiyun 	bool send_signal = true;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
843*4882a593Smuzhiyun 	idr_for_each_entry_continue(&p->event_idr, ev, id)
844*4882a593Smuzhiyun 		if (ev->type == type) {
845*4882a593Smuzhiyun 			send_signal = false;
846*4882a593Smuzhiyun 			dev_dbg(kfd_device,
847*4882a593Smuzhiyun 					"Event found: id %X type %d",
848*4882a593Smuzhiyun 					ev->event_id, ev->type);
849*4882a593Smuzhiyun 			set_event(ev);
850*4882a593Smuzhiyun 			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
851*4882a593Smuzhiyun 				ev->memory_exception_data = *ev_data;
852*4882a593Smuzhiyun 		}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	if (type == KFD_EVENT_TYPE_MEMORY) {
855*4882a593Smuzhiyun 		dev_warn(kfd_device,
856*4882a593Smuzhiyun 			"Sending SIGSEGV to process %d (pasid 0x%x)",
857*4882a593Smuzhiyun 				p->lead_thread->pid, p->pasid);
858*4882a593Smuzhiyun 		send_sig(SIGSEGV, p->lead_thread, 0);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/* Send SIGTERM no event of type "type" has been found*/
862*4882a593Smuzhiyun 	if (send_signal) {
863*4882a593Smuzhiyun 		if (send_sigterm) {
864*4882a593Smuzhiyun 			dev_warn(kfd_device,
865*4882a593Smuzhiyun 				"Sending SIGTERM to process %d (pasid 0x%x)",
866*4882a593Smuzhiyun 					p->lead_thread->pid, p->pasid);
867*4882a593Smuzhiyun 			send_sig(SIGTERM, p->lead_thread, 0);
868*4882a593Smuzhiyun 		} else {
869*4882a593Smuzhiyun 			dev_err(kfd_device,
870*4882a593Smuzhiyun 				"Process %d (pasid 0x%x) got unhandled exception",
871*4882a593Smuzhiyun 				p->lead_thread->pid, p->pasid);
872*4882a593Smuzhiyun 		}
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun #ifdef KFD_SUPPORT_IOMMU_V2
kfd_signal_iommu_event(struct kfd_dev * dev,u32 pasid,unsigned long address,bool is_write_requested,bool is_execute_requested)877*4882a593Smuzhiyun void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
878*4882a593Smuzhiyun 		unsigned long address, bool is_write_requested,
879*4882a593Smuzhiyun 		bool is_execute_requested)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data memory_exception_data;
882*4882a593Smuzhiyun 	struct vm_area_struct *vma;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/*
885*4882a593Smuzhiyun 	 * Because we are called from arbitrary context (workqueue) as opposed
886*4882a593Smuzhiyun 	 * to process context, kfd_process could attempt to exit while we are
887*4882a593Smuzhiyun 	 * running so the lookup function increments the process ref count.
888*4882a593Smuzhiyun 	 */
889*4882a593Smuzhiyun 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
890*4882a593Smuzhiyun 	struct mm_struct *mm;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	if (!p)
893*4882a593Smuzhiyun 		return; /* Presumably process exited. */
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	/* Take a safe reference to the mm_struct, which may otherwise
896*4882a593Smuzhiyun 	 * disappear even while the kfd_process is still referenced.
897*4882a593Smuzhiyun 	 */
898*4882a593Smuzhiyun 	mm = get_task_mm(p->lead_thread);
899*4882a593Smuzhiyun 	if (!mm) {
900*4882a593Smuzhiyun 		kfd_unref_process(p);
901*4882a593Smuzhiyun 		return; /* Process is exiting */
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	mmap_read_lock(mm);
907*4882a593Smuzhiyun 	vma = find_vma(mm, address);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	memory_exception_data.gpu_id = dev->id;
910*4882a593Smuzhiyun 	memory_exception_data.va = address;
911*4882a593Smuzhiyun 	/* Set failure reason */
912*4882a593Smuzhiyun 	memory_exception_data.failure.NotPresent = 1;
913*4882a593Smuzhiyun 	memory_exception_data.failure.NoExecute = 0;
914*4882a593Smuzhiyun 	memory_exception_data.failure.ReadOnly = 0;
915*4882a593Smuzhiyun 	if (vma && address >= vma->vm_start) {
916*4882a593Smuzhiyun 		memory_exception_data.failure.NotPresent = 0;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 		if (is_write_requested && !(vma->vm_flags & VM_WRITE))
919*4882a593Smuzhiyun 			memory_exception_data.failure.ReadOnly = 1;
920*4882a593Smuzhiyun 		else
921*4882a593Smuzhiyun 			memory_exception_data.failure.ReadOnly = 0;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 		if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
924*4882a593Smuzhiyun 			memory_exception_data.failure.NoExecute = 1;
925*4882a593Smuzhiyun 		else
926*4882a593Smuzhiyun 			memory_exception_data.failure.NoExecute = 0;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	mmap_read_unlock(mm);
930*4882a593Smuzhiyun 	mmput(mm);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	pr_debug("notpresent %d, noexecute %d, readonly %d\n",
933*4882a593Smuzhiyun 			memory_exception_data.failure.NotPresent,
934*4882a593Smuzhiyun 			memory_exception_data.failure.NoExecute,
935*4882a593Smuzhiyun 			memory_exception_data.failure.ReadOnly);
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	/* Workaround on Raven to not kill the process when memory is freed
938*4882a593Smuzhiyun 	 * before IOMMU is able to finish processing all the excessive PPRs
939*4882a593Smuzhiyun 	 */
940*4882a593Smuzhiyun 	if (dev->device_info->asic_family != CHIP_RAVEN &&
941*4882a593Smuzhiyun 	    dev->device_info->asic_family != CHIP_RENOIR) {
942*4882a593Smuzhiyun 		mutex_lock(&p->event_mutex);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		/* Lookup events by type and signal them */
945*4882a593Smuzhiyun 		lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
946*4882a593Smuzhiyun 				&memory_exception_data);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 		mutex_unlock(&p->event_mutex);
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	kfd_unref_process(p);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun #endif /* KFD_SUPPORT_IOMMU_V2 */
954*4882a593Smuzhiyun 
kfd_signal_hw_exception_event(u32 pasid)955*4882a593Smuzhiyun void kfd_signal_hw_exception_event(u32 pasid)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	/*
958*4882a593Smuzhiyun 	 * Because we are called from arbitrary context (workqueue) as opposed
959*4882a593Smuzhiyun 	 * to process context, kfd_process could attempt to exit while we are
960*4882a593Smuzhiyun 	 * running so the lookup function increments the process ref count.
961*4882a593Smuzhiyun 	 */
962*4882a593Smuzhiyun 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	if (!p)
965*4882a593Smuzhiyun 		return; /* Presumably process exited. */
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	/* Lookup events by type and signal them */
970*4882a593Smuzhiyun 	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
973*4882a593Smuzhiyun 	kfd_unref_process(p);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
kfd_signal_vm_fault_event(struct kfd_dev * dev,u32 pasid,struct kfd_vm_fault_info * info)976*4882a593Smuzhiyun void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
977*4882a593Smuzhiyun 				struct kfd_vm_fault_info *info)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun 	struct kfd_event *ev;
980*4882a593Smuzhiyun 	uint32_t id;
981*4882a593Smuzhiyun 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
982*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data memory_exception_data;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (!p)
985*4882a593Smuzhiyun 		return; /* Presumably process exited. */
986*4882a593Smuzhiyun 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
987*4882a593Smuzhiyun 	memory_exception_data.gpu_id = dev->id;
988*4882a593Smuzhiyun 	memory_exception_data.failure.imprecise = true;
989*4882a593Smuzhiyun 	/* Set failure reason */
990*4882a593Smuzhiyun 	if (info) {
991*4882a593Smuzhiyun 		memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
992*4882a593Smuzhiyun 		memory_exception_data.failure.NotPresent =
993*4882a593Smuzhiyun 			info->prot_valid ? 1 : 0;
994*4882a593Smuzhiyun 		memory_exception_data.failure.NoExecute =
995*4882a593Smuzhiyun 			info->prot_exec ? 1 : 0;
996*4882a593Smuzhiyun 		memory_exception_data.failure.ReadOnly =
997*4882a593Smuzhiyun 			info->prot_write ? 1 : 0;
998*4882a593Smuzhiyun 		memory_exception_data.failure.imprecise = 0;
999*4882a593Smuzhiyun 	}
1000*4882a593Smuzhiyun 	mutex_lock(&p->event_mutex);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1003*4882a593Smuzhiyun 	idr_for_each_entry_continue(&p->event_idr, ev, id)
1004*4882a593Smuzhiyun 		if (ev->type == KFD_EVENT_TYPE_MEMORY) {
1005*4882a593Smuzhiyun 			ev->memory_exception_data = memory_exception_data;
1006*4882a593Smuzhiyun 			set_event(ev);
1007*4882a593Smuzhiyun 		}
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	mutex_unlock(&p->event_mutex);
1010*4882a593Smuzhiyun 	kfd_unref_process(p);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
kfd_signal_reset_event(struct kfd_dev * dev)1013*4882a593Smuzhiyun void kfd_signal_reset_event(struct kfd_dev *dev)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	struct kfd_hsa_hw_exception_data hw_exception_data;
1016*4882a593Smuzhiyun 	struct kfd_hsa_memory_exception_data memory_exception_data;
1017*4882a593Smuzhiyun 	struct kfd_process *p;
1018*4882a593Smuzhiyun 	struct kfd_event *ev;
1019*4882a593Smuzhiyun 	unsigned int temp;
1020*4882a593Smuzhiyun 	uint32_t id, idx;
1021*4882a593Smuzhiyun 	int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
1022*4882a593Smuzhiyun 			KFD_HW_EXCEPTION_ECC :
1023*4882a593Smuzhiyun 			KFD_HW_EXCEPTION_GPU_HANG;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	/* Whole gpu reset caused by GPU hang and memory is lost */
1026*4882a593Smuzhiyun 	memset(&hw_exception_data, 0, sizeof(hw_exception_data));
1027*4882a593Smuzhiyun 	hw_exception_data.gpu_id = dev->id;
1028*4882a593Smuzhiyun 	hw_exception_data.memory_lost = 1;
1029*4882a593Smuzhiyun 	hw_exception_data.reset_cause = reset_cause;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	memset(&memory_exception_data, 0, sizeof(memory_exception_data));
1032*4882a593Smuzhiyun 	memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
1033*4882a593Smuzhiyun 	memory_exception_data.gpu_id = dev->id;
1034*4882a593Smuzhiyun 	memory_exception_data.failure.imprecise = true;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	idx = srcu_read_lock(&kfd_processes_srcu);
1037*4882a593Smuzhiyun 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1038*4882a593Smuzhiyun 		mutex_lock(&p->event_mutex);
1039*4882a593Smuzhiyun 		id = KFD_FIRST_NONSIGNAL_EVENT_ID;
1040*4882a593Smuzhiyun 		idr_for_each_entry_continue(&p->event_idr, ev, id) {
1041*4882a593Smuzhiyun 			if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
1042*4882a593Smuzhiyun 				ev->hw_exception_data = hw_exception_data;
1043*4882a593Smuzhiyun 				set_event(ev);
1044*4882a593Smuzhiyun 			}
1045*4882a593Smuzhiyun 			if (ev->type == KFD_EVENT_TYPE_MEMORY &&
1046*4882a593Smuzhiyun 			    reset_cause == KFD_HW_EXCEPTION_ECC) {
1047*4882a593Smuzhiyun 				ev->memory_exception_data = memory_exception_data;
1048*4882a593Smuzhiyun 				set_event(ev);
1049*4882a593Smuzhiyun 			}
1050*4882a593Smuzhiyun 		}
1051*4882a593Smuzhiyun 		mutex_unlock(&p->event_mutex);
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 	srcu_read_unlock(&kfd_processes_srcu, idx);
1054*4882a593Smuzhiyun }
1055