1*4882a593Smuzhiyun /************************************************************************** 2*4882a593Smuzhiyun * 3*4882a593Smuzhiyun * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4*4882a593Smuzhiyun * All Rights Reserved. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 7*4882a593Smuzhiyun * copy of this software and associated documentation files (the 8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including 9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish, 10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to 11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to 12*4882a593Smuzhiyun * the following conditions: 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the 15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions 16*4882a593Smuzhiyun * of the Software. 17*4882a593Smuzhiyun * 18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE. 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun **************************************************************************/ 27*4882a593Smuzhiyun /* 28*4882a593Smuzhiyun * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29*4882a593Smuzhiyun */ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #ifndef _TTM_EXECBUF_UTIL_H_ 32*4882a593Smuzhiyun #define _TTM_EXECBUF_UTIL_H_ 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun #include <linux/list.h> 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #include "ttm_bo_api.h" 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun /** 39*4882a593Smuzhiyun * struct ttm_validate_buffer 40*4882a593Smuzhiyun * 41*4882a593Smuzhiyun * @head: list head for thread-private list. 42*4882a593Smuzhiyun * @bo: refcounted buffer object pointer. 43*4882a593Smuzhiyun * @num_shared: How many shared fences we want to add. 44*4882a593Smuzhiyun */ 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun struct ttm_validate_buffer { 47*4882a593Smuzhiyun struct list_head head; 48*4882a593Smuzhiyun struct ttm_buffer_object *bo; 49*4882a593Smuzhiyun unsigned int num_shared; 50*4882a593Smuzhiyun }; 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun /** 53*4882a593Smuzhiyun * function ttm_eu_backoff_reservation 54*4882a593Smuzhiyun * 55*4882a593Smuzhiyun * @ticket: ww_acquire_ctx from reserve call 56*4882a593Smuzhiyun * @list: thread private list of ttm_validate_buffer structs. 57*4882a593Smuzhiyun * 58*4882a593Smuzhiyun * Undoes all buffer validation reservations for bos pointed to by 59*4882a593Smuzhiyun * the list entries. 60*4882a593Smuzhiyun */ 61*4882a593Smuzhiyun void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 62*4882a593Smuzhiyun struct list_head *list); 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun /** 65*4882a593Smuzhiyun * function ttm_eu_reserve_buffers 66*4882a593Smuzhiyun * 67*4882a593Smuzhiyun * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only 68*4882a593Smuzhiyun * non-blocking reserves should be tried. 69*4882a593Smuzhiyun * @list: thread private list of ttm_validate_buffer structs. 70*4882a593Smuzhiyun * @intr: should the wait be interruptible 71*4882a593Smuzhiyun * @dups: [out] optional list of duplicates. 72*4882a593Smuzhiyun * @del_lru: true if BOs should be removed from the LRU. 73*4882a593Smuzhiyun * 74*4882a593Smuzhiyun * Tries to reserve bos pointed to by the list entries for validation. 75*4882a593Smuzhiyun * If the function returns 0, all buffers are marked as "unfenced", 76*4882a593Smuzhiyun * taken off the lru lists and are not synced for write CPU usage. 77*4882a593Smuzhiyun * 78*4882a593Smuzhiyun * If the function detects a deadlock due to multiple threads trying to 79*4882a593Smuzhiyun * reserve the same buffers in reverse order, all threads except one will 80*4882a593Smuzhiyun * back off and retry. This function may sleep while waiting for 81*4882a593Smuzhiyun * CPU write reservations to be cleared, and for other threads to 82*4882a593Smuzhiyun * unreserve their buffers. 83*4882a593Smuzhiyun * 84*4882a593Smuzhiyun * If intr is set to true, this function may return -ERESTARTSYS if the 85*4882a593Smuzhiyun * calling process receives a signal while waiting. In that case, no 86*4882a593Smuzhiyun * buffers on the list will be reserved upon return. 87*4882a593Smuzhiyun * 88*4882a593Smuzhiyun * If dups is non NULL all buffers already reserved by the current thread 89*4882a593Smuzhiyun * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned 90*4882a593Smuzhiyun * on the first already reserved buffer and all buffers from the list are 91*4882a593Smuzhiyun * unreserved again. 92*4882a593Smuzhiyun * 93*4882a593Smuzhiyun * Buffers reserved by this function should be unreserved by 94*4882a593Smuzhiyun * a call to either ttm_eu_backoff_reservation() or 95*4882a593Smuzhiyun * ttm_eu_fence_buffer_objects() when command submission is complete or 96*4882a593Smuzhiyun * has failed. 97*4882a593Smuzhiyun */ 98*4882a593Smuzhiyun int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 99*4882a593Smuzhiyun struct list_head *list, bool intr, 100*4882a593Smuzhiyun struct list_head *dups); 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun /** 103*4882a593Smuzhiyun * function ttm_eu_fence_buffer_objects. 104*4882a593Smuzhiyun * 105*4882a593Smuzhiyun * @ticket: ww_acquire_ctx from reserve call 106*4882a593Smuzhiyun * @list: thread private list of ttm_validate_buffer structs. 107*4882a593Smuzhiyun * @fence: The new exclusive fence for the buffers. 108*4882a593Smuzhiyun * 109*4882a593Smuzhiyun * This function should be called when command submission is complete, and 110*4882a593Smuzhiyun * it will add a new sync object to bos pointed to by entries on @list. 111*4882a593Smuzhiyun * It also unreserves all buffers, putting them on lru lists. 112*4882a593Smuzhiyun * 113*4882a593Smuzhiyun */ 114*4882a593Smuzhiyun void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 115*4882a593Smuzhiyun struct list_head *list, 116*4882a593Smuzhiyun struct dma_fence *fence); 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun #endif 119