Lines Matching refs:dma_map
273 struct xsk_dma_map *dma_map; in xp_find_dma_map() local
275 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
276 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
277 return dma_map; in xp_find_dma_map()
286 struct xsk_dma_map *dma_map; in xp_create_dma_map() local
288 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); in xp_create_dma_map()
289 if (!dma_map) in xp_create_dma_map()
292 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
293 if (!dma_map->dma_pages) { in xp_create_dma_map()
294 kfree(dma_map); in xp_create_dma_map()
298 dma_map->netdev = netdev; in xp_create_dma_map()
299 dma_map->dev = dev; in xp_create_dma_map()
300 dma_map->dma_need_sync = false; in xp_create_dma_map()
301 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
302 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
303 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
304 return dma_map; in xp_create_dma_map()
307 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) in xp_destroy_dma_map() argument
309 list_del(&dma_map->list); in xp_destroy_dma_map()
310 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
311 kfree(dma_map); in xp_destroy_dma_map()
314 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) in __xp_dma_unmap() argument
319 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
320 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
323 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
329 xp_destroy_dma_map(dma_map); in __xp_dma_unmap()
334 struct xsk_dma_map *dma_map; in xp_dma_unmap() local
339 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
340 if (!dma_map) { in xp_dma_unmap()
345 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
348 __xp_dma_unmap(dma_map, attrs); in xp_dma_unmap()
355 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) in xp_check_dma_contiguity() argument
359 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
360 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
361 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
363 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
367 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
369 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
373 pool->dev = dma_map->dev; in xp_init_dma_info()
374 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
375 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
376 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
385 struct xsk_dma_map *dma_map; in xp_dma_map() local
390 dma_map = xp_find_dma_map(pool); in xp_dma_map()
391 if (dma_map) { in xp_dma_map()
392 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
396 refcount_inc(&dma_map->users); in xp_dma_map()
400 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
401 if (!dma_map) in xp_dma_map()
404 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
408 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()
412 dma_map->dma_need_sync = true; in xp_dma_map()
413 dma_map->dma_pages[i] = dma; in xp_dma_map()
417 xp_check_dma_contiguity(dma_map); in xp_dma_map()
419 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
421 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()