xref: /optee_os/core/drivers/crypto/caam/utils/utils_dmaobj.c (revision 76d6685e5f3b91d66dc2091b9d61601c050298bb)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2020-2021, 2023 NXP
4  *
5  * CAAM DMA data object utilities.
6  */
7 
8 #include <caam_trace.h>
9 #include <caam_utils_dmaobj.h>
10 #include <caam_utils_mem.h>
11 #include <caam_utils_sgt.h>
12 #include <caam_utils_status.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/spinlock.h>
15 #include <mm/core_memprot.h>
16 #include <string.h>
17 #include <tee/cache.h>
18 
19 #define IS_DMA_OVERFLOW(addr) ((addr) > UINT32_MAX)
20 #define MAX_BUFFER_ALLOC_SIZE ((size_t)(8 * 1024))
21 
22 /*
23  * Local defines used to identify Object type as:
24  *  - input or output data
25  *  - SGT object created because buffer is not physical contiguous
26  *  - derived object (not buffer reallocation)
27  *  - allocated origin buffer
28  */
29 #define DMAOBJ_INPUT	  BIT(0)
30 #define DMAOBJ_OUTPUT	  BIT(1)
31 #define DMAOBJ_ALLOC_ORIG BIT(2)
32 #define DMAOBJ_DONT_COPY  BIT(3)
33 
34 /*
35  * DMA Buffer
36  *
37  * @require    DMA Buffer size require
38  * @allocated  Size of the buffer allocated
39  * @remind     Size available in the buffer
40  * @buf        CAAM Buffer
41  */
42 struct caamdmabuf {
43 	size_t require;
44 	size_t allocated;
45 	size_t remind;
46 	struct caambuf buf;
47 };
48 
49 /*
50  * DMA Object buffer entry
51  *
52  * @newbuf        True if list entry is a new DMA Buffer
53  * @nodma_access  Buffer is not accessible from CAAM DMA
54  * @nocopy        Buffer doesn't have to be copied back to the origin
55  * @origbuf       Original buffer reference
56  * @link          Pointer to next entry
57  */
58 struct dmaentry {
59 	bool newbuf;
60 	bool nodma_access;
61 	bool nocopy;
62 
63 	struct caambuf origbuf;
64 
65 	TAILQ_ENTRY(dmaentry) link;
66 };
67 
68 /*
69  * SGT/Buffer Data currently handled
70  *
71  * @orig    Original buffer reference
72  * @dma     DMA Buffer (new or original)
73  * @length  Buffer length
74  */
75 struct sgtdata {
76 	uint8_t *orig;
77 	uint8_t *dma;
78 	size_t length;
79 };
80 
81 /*
82  * CAAM DMA private Object data
83  * @type         Type of DMA Object
84  * @nb_sgtbuf    Number of SGT/Buffer entries allocated
85  * @dmabuf       DMA Buffer allocated
86  * @sgtdata      Reference to SGT/Buffer list in used
87  * @list         List of the DMA Object buffer entry
88  */
89 struct priv_dmaobj {
90 	unsigned int type;
91 	unsigned int nb_sgtbuf;
92 
93 	struct caamdmabuf dmabuf;
94 	struct sgtdata *sgtdata;
95 
96 	TAILQ_HEAD(dmalist, dmaentry) list;
97 };
98 
99 /*
100  * Memory allocation and free spinlock to ensure that in case
101  * of big buffer reallocation, memory used is freed
102  */
103 static unsigned int memlock;
104 
105 /*
106  * Try to allocate a DMA Buffer of type input or output data of @size bytes.
107  * If allocation success, set the DMA Buffer settings, else
108  * return in error.
109  *
110  * @priv  CAAM DMA object private data
111  * @size  Size of the DMA Buffer to allocate
112  */
try_allocate_dmabuf(struct priv_dmaobj * priv,size_t size)113 static TEE_Result try_allocate_dmabuf(struct priv_dmaobj *priv, size_t size)
114 {
115 	enum caam_status retstatus = CAAM_FAILURE;
116 
117 	if (priv->dmabuf.allocated) {
118 		caam_free_buf(&priv->dmabuf.buf);
119 		priv->dmabuf.allocated = 0;
120 	}
121 
122 	if (priv->type & DMAOBJ_INPUT)
123 		retstatus = caam_alloc_buf(&priv->dmabuf.buf, size);
124 	else
125 		retstatus = caam_alloc_align_buf(&priv->dmabuf.buf, size);
126 
127 	DMAOBJ_TRACE("Alloc %s DMA buffer (%zu) ret 0x%" PRIx32,
128 		     (priv->type & DMAOBJ_INPUT) ? "Input" : "Output", size,
129 		     retstatus);
130 
131 	if (retstatus == CAAM_NO_ERROR) {
132 		DMAOBJ_TRACE("DMA buffer Allocation Success");
133 		/* Set the Object's DMA Buffer settings */
134 		priv->dmabuf.allocated = size;
135 		priv->dmabuf.remind = size;
136 		priv->dmabuf.buf.length = 0;
137 		return TEE_SUCCESS;
138 	}
139 
140 	DMAOBJ_TRACE("DMA buffer Allocation Failure");
141 	return TEE_ERROR_OUT_OF_MEMORY;
142 }
143 
144 /*
145  * Allocate and initialize the CAAM DMA object's private data.
146  *
147  * @obj   CAAM DMA Object
148  * @type  Type of the CAAM DMA Object (i.e. Input or Output)
149  */
allocate_private(struct caamdmaobj * obj,unsigned int type)150 static TEE_Result allocate_private(struct caamdmaobj *obj, unsigned int type)
151 {
152 	struct priv_dmaobj *priv = NULL;
153 
154 	priv = caam_calloc(sizeof(*priv));
155 	if (!priv)
156 		return TEE_ERROR_OUT_OF_MEMORY;
157 
158 	obj->priv = priv;
159 
160 	/* Set the object type as input */
161 	priv->type = type;
162 
163 	TAILQ_INIT(&priv->list);
164 
165 	return TEE_SUCCESS;
166 }
167 
168 /*
169  * Fill the @sgtdata object to record the current input/output data
170  * handled in the DMA SGT/Buffer object.
171  * Increment the SGT/Buffer length according
172  *
173  * @obj      CAAM DMA object
174  * @sgtdata  [out] SGT Data handled
175  * @entry    DMA Object buffer entry
176  * @dma      DMA SGT/Buffer object
177  * @offset   Start offset of the DMA Object buffer
178  */
add_sgtdata_entry(struct caamdmaobj * obj,struct sgtdata * sgtdata,struct dmaentry * entry,struct caambuf * dma,size_t offset)179 static void add_sgtdata_entry(struct caamdmaobj *obj, struct sgtdata *sgtdata,
180 			      struct dmaentry *entry, struct caambuf *dma,
181 			      size_t offset)
182 {
183 	if (entry->nocopy) {
184 		sgtdata->orig = 0;
185 		sgtdata->length = 0;
186 		sgtdata->dma = 0;
187 	} else {
188 		sgtdata->orig = entry->origbuf.data + offset;
189 		sgtdata->length = dma->length;
190 		sgtdata->dma = dma->data;
191 	}
192 
193 	obj->sgtbuf.length += dma->length;
194 }
195 
196 /*
197  * Add a new DMA Buffer entry as first element of the list.
198  * Return NULL if error, else the new entry in the list
199  *
200  * @priv    DMA Object private data
201  * @orig    Original buffer reference
202  */
dmalist_add_entry_head(struct priv_dmaobj * priv,struct caambuf * orig)203 static struct dmaentry *dmalist_add_entry_head(struct priv_dmaobj *priv,
204 					       struct caambuf *orig)
205 {
206 	struct dmaentry *entry = NULL;
207 
208 	entry = caam_calloc(sizeof(*entry));
209 	if (entry) {
210 		/* Save the original buffer reference */
211 		memcpy(&entry->origbuf, orig, sizeof(entry->origbuf));
212 		DMAOBJ_TRACE("entry %p - insert head entry of %zu bytes", entry,
213 			     orig->length);
214 		TAILQ_INSERT_HEAD(&priv->list, entry, link);
215 	}
216 
217 	return entry;
218 }
219 
220 /*
221  * Add a new DMA Buffer entry in the list.
222  * Return NULL if error, else the new entry in the list
223  *
224  * @priv    DMA Object private data
225  * @orig    Original buffer reference
226  */
dmalist_add_entry(struct priv_dmaobj * priv,struct caambuf * orig)227 static struct dmaentry *dmalist_add_entry(struct priv_dmaobj *priv,
228 					  struct caambuf *orig)
229 {
230 	struct dmaentry *entry = NULL;
231 
232 	entry = caam_calloc(sizeof(*entry));
233 	if (entry) {
234 		/* Save the original buffer reference */
235 		memcpy(&entry->origbuf, orig, sizeof(entry->origbuf));
236 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
237 			     orig->length);
238 		if (TAILQ_EMPTY(&priv->list))
239 			TAILQ_INSERT_HEAD(&priv->list, entry, link);
240 		else
241 			TAILQ_INSERT_TAIL(&priv->list, entry, link);
242 	}
243 
244 	return entry;
245 }
246 
247 /*
248  * Insert and allocate a DMA entry in the list before the given DMA entry.
249  * Return the allocated DMA entry.
250  *
251  * @priv   DMA Object private data
252  * @before DMA entry after the new DMA entry
253  * @new    CAAM buffer of the new DMA entry
254  */
dmalist_insert_before_entry(struct priv_dmaobj * priv,struct dmaentry * before,struct caambuf * new)255 static struct dmaentry *dmalist_insert_before_entry(struct priv_dmaobj *priv,
256 						    struct dmaentry *before,
257 						    struct caambuf *new)
258 {
259 	struct dmaentry *entry = NULL;
260 
261 	entry = caam_calloc(sizeof(*entry));
262 	if (entry) {
263 		/* Save the original buffer reference */
264 		memcpy(&entry->origbuf, new, sizeof(entry->origbuf));
265 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
266 			     new->length);
267 		if (TAILQ_FIRST(&priv->list) == before)
268 			TAILQ_INSERT_HEAD(&priv->list, entry, link);
269 		else
270 			TAILQ_INSERT_BEFORE(before, entry, link);
271 	}
272 
273 	return entry;
274 }
275 
276 /*
277  * Insert and allocate a DMA entry in the list after the given DMA entry.
278  * Return the allocated DMA entry.
279  *
280  * @priv   DMA Object private data
281  * @after  DMA entry before the new DMA entry
282  * @new    CAAM buffer of the new DMA entry
283  */
dmalist_insert_after_entry(struct priv_dmaobj * priv,struct dmaentry * after,struct caambuf * new)284 static struct dmaentry *dmalist_insert_after_entry(struct priv_dmaobj *priv,
285 						   struct dmaentry *after,
286 						   struct caambuf *new)
287 {
288 	struct dmaentry *entry = NULL;
289 
290 	entry = caam_calloc(sizeof(*entry));
291 	if (entry) {
292 		/* Save the original buffer reference */
293 		memcpy(&entry->origbuf, new, sizeof(entry->origbuf));
294 		DMAOBJ_TRACE("entry %p - insert entry of %zu bytes", entry,
295 			     new->length);
296 		TAILQ_INSERT_AFTER(&priv->list, after, entry, link);
297 	}
298 
299 	return entry;
300 }
301 
302 /*
303  * Apply the cache operation @op to the DMA Object (SGT or buffer)
304  *
305  * @op    Cache operation
306  * @obj   CAAM DMA object
307  */
dmaobj_cache_operation(enum utee_cache_operation op,struct caamdmaobj * obj)308 static inline void dmaobj_cache_operation(enum utee_cache_operation op,
309 					  struct caamdmaobj *obj)
310 {
311 	if (!obj->sgtbuf.length)
312 		return;
313 
314 	if (obj->sgtbuf.sgt_type)
315 		caam_sgt_cache_op(op, &obj->sgtbuf, obj->sgtbuf.length);
316 	else if (!obj->sgtbuf.buf->nocache)
317 		cache_operation(op, obj->sgtbuf.buf->data, obj->sgtbuf.length);
318 }
319 
320 /*
321  * Set the required allocation size for the DMA buffer.
322  *
323  * @priv   DMA Object private data
324  * @length Required buffer size
325  */
add_dma_require(struct priv_dmaobj * priv,size_t length)326 static inline void add_dma_require(struct priv_dmaobj *priv, size_t length)
327 {
328 	size_t tmp = 0;
329 
330 	if (ADD_OVERFLOW(priv->dmabuf.require, length, &tmp))
331 		priv->dmabuf.require = SIZE_MAX;
332 	else
333 		priv->dmabuf.require = tmp;
334 }
335 
336 /*
337  * Check if the buffer start/end addresses are aligned on the cache line.
338  * If not flags as start and/or end addresses not aligned, expect if the
339  * maximum length @maxlen to use is inside a cache line size. In this case,
340  * flags to allocate a new buffer.
341  *
342  * @priv    DMA Object private data
343  * @maxlen  Maximum length to use
344  */
check_buffer_alignment(struct priv_dmaobj * priv,size_t maxlen)345 static TEE_Result check_buffer_alignment(struct priv_dmaobj *priv,
346 					 size_t maxlen)
347 {
348 	unsigned int cacheline_size = 0;
349 	struct dmaentry *entry = NULL;
350 	struct dmaentry *new_entry = NULL;
351 	struct caambuf newbuf = {};
352 	vaddr_t va_start = 0;
353 	vaddr_t va_end = 0;
354 	vaddr_t va_end_align = 0;
355 	vaddr_t va_start_align = 0;
356 	size_t remlen = 0;
357 	size_t acclen = 0;
358 
359 	cacheline_size = dcache_get_line_size();
360 
361 	TAILQ_FOREACH(entry, &priv->list, link) {
362 		DMAOBJ_TRACE("Entry %p: start %p len %zu (%zu >= %zu)", entry,
363 			     entry->origbuf.data, entry->origbuf.length, acclen,
364 			     maxlen);
365 
366 		/* No need to continue if we convert the needed length */
367 		if (acclen >= maxlen)
368 			return TEE_SUCCESS;
369 
370 		acclen += entry->origbuf.length;
371 
372 		if (entry->nodma_access || entry->newbuf)
373 			continue;
374 
375 		if (entry->origbuf.length < cacheline_size) {
376 			/*
377 			 * Length of the entry is not aligned on cache size
378 			 * Require a full aligned buffer
379 			 */
380 			DMAOBJ_TRACE("Length %zu vs cache line %u",
381 				     entry->origbuf.length, cacheline_size);
382 
383 			entry->newbuf = true;
384 			add_dma_require(priv, entry->origbuf.length);
385 			continue;
386 		}
387 
388 		va_start = (vaddr_t)entry->origbuf.data;
389 		va_start_align = ROUNDUP2(va_start, cacheline_size);
390 
391 		if (va_start_align != va_start) {
392 			DMAOBJ_TRACE("Start 0x%" PRIxVA " vs align 0x%" PRIxVA,
393 				     va_start, va_start_align);
394 
395 			remlen = entry->origbuf.length -
396 				 (va_start_align - va_start);
397 			if (remlen <= cacheline_size) {
398 				/*
399 				 * Start address is not aligned and the
400 				 * remaining length if after re-alignment
401 				 * is not cache size aligned.
402 				 * Require a full aligned buffer
403 				 */
404 				DMAOBJ_TRACE("Rem length %zu vs cache line %u",
405 					     remlen, cacheline_size);
406 				entry->newbuf = true;
407 				add_dma_require(priv, entry->origbuf.length);
408 				continue;
409 			}
410 
411 			/*
412 			 * Insert a new entry to make buffer on a cache line.
413 			 */
414 			newbuf.data = entry->origbuf.data;
415 			newbuf.length = va_start_align - va_start;
416 			newbuf.paddr = entry->origbuf.paddr;
417 			newbuf.nocache = entry->origbuf.nocache;
418 
419 			add_dma_require(priv, newbuf.length);
420 			new_entry = dmalist_insert_before_entry(priv, entry,
421 								&newbuf);
422 			if (!new_entry)
423 				return TEE_ERROR_OUT_OF_MEMORY;
424 
425 			new_entry->newbuf = true;
426 
427 			/*
428 			 * Update current entry with align address and new
429 			 * length.
430 			 */
431 			entry->origbuf.data = (uint8_t *)va_start_align;
432 			entry->origbuf.length -= newbuf.length;
433 			entry->origbuf.paddr += newbuf.length;
434 
435 			/*
436 			 * Set current entry to new entry to continue
437 			 * the FOREACH loop from this new_entry and then
438 			 * verify the rest of the entry modified.
439 			 */
440 			entry = new_entry;
441 			acclen -= entry->origbuf.length;
442 			continue;
443 		}
444 
445 		va_end = (vaddr_t)entry->origbuf.data + entry->origbuf.length;
446 		va_end_align = ROUNDUP2(va_end, cacheline_size);
447 
448 		if (va_end != va_end_align) {
449 			DMAOBJ_TRACE("End 0x%" PRIxVA " vs align 0x%" PRIxVA,
450 				     va_end, va_end_align);
451 
452 			va_end_align = ROUNDDOWN2(va_end, cacheline_size);
453 			remlen = entry->origbuf.length - va_end_align;
454 
455 			if (remlen <= cacheline_size) {
456 				/*
457 				 * End address is not aligned and the remaining
458 				 * length if after re-alignment is not cache
459 				 * size aligned.
460 				 * Require a full aligned buffer
461 				 */
462 				DMAOBJ_TRACE("Rem length %zu vs cache line %u",
463 					     remlen, cacheline_size);
464 				entry->newbuf = true;
465 				add_dma_require(priv, entry->origbuf.length);
466 				continue;
467 			}
468 
469 			/*
470 			 * Insert a new entry to make buffer on a cache line.
471 			 */
472 			newbuf.data = (uint8_t *)va_end_align;
473 			newbuf.length = va_end - va_end_align;
474 			newbuf.paddr = entry->origbuf.paddr + newbuf.length;
475 			newbuf.nocache = entry->origbuf.nocache;
476 
477 			add_dma_require(priv, newbuf.length);
478 
479 			new_entry = dmalist_insert_after_entry(priv, entry,
480 							       &newbuf);
481 			if (!new_entry)
482 				return TEE_ERROR_OUT_OF_MEMORY;
483 
484 			new_entry->newbuf = true;
485 
486 			/* Update current entry with new length */
487 			entry->origbuf.length -= newbuf.length;
488 
489 			/*
490 			 * Set current entry to new entry to continue
491 			 * the FOREACH loop from this new_entry and then
492 			 * verify the rest of the entry modified.
493 			 */
494 			entry = new_entry;
495 			acclen -= newbuf.length;
496 			continue;
497 		}
498 	}
499 
500 	return TEE_SUCCESS;
501 }
502 
503 /*
504  * Go through all the @orig space to extract all physical area used to
505  * map the buffer.
506  * If one of the physical area is not accessible by the CAAM DMA, flag it
507  * to be reallocated with DMA accessible buffer.
508  * If the DMA Object is an output buffer, check and flag the start/end
509  * address of the buffer to be aligned on a cache line.
510  *
511  * @obj     CAAM DMA object
512  * @orig    Original Data
513  * @maxlen  Maximum length to use
514  */
check_buffer_boundary(struct caamdmaobj * obj,struct caambuf * orig,size_t maxlen)515 static TEE_Result check_buffer_boundary(struct caamdmaobj *obj,
516 					struct caambuf *orig, size_t maxlen)
517 {
518 	TEE_Result ret = TEE_ERROR_OUT_OF_MEMORY;
519 	struct priv_dmaobj *priv = obj->priv;
520 	struct dmaentry *entry = NULL;
521 	struct caambuf *pabufs = NULL;
522 	int nb_pa_area = -1;
523 	int idx = 0;
524 	paddr_t last_pa = 0;
525 	size_t remlen = maxlen;
526 	size_t tmp = 0;
527 
528 	/*
529 	 * Get the number of physical areas used by the
530 	 * DMA Buffer
531 	 */
532 	nb_pa_area = caam_mem_get_pa_area(orig, &pabufs);
533 	DMAOBJ_TRACE("Number of pa areas = %d (for max length %zu bytes)",
534 		     nb_pa_area, remlen);
535 	if (nb_pa_area == -1)
536 		goto out;
537 
538 	for (idx = 0; idx < nb_pa_area && remlen; idx++) {
539 		DMAOBJ_TRACE("Remaining length = %zu", remlen);
540 		if (ADD_OVERFLOW(pabufs[idx].paddr, pabufs[idx].length,
541 				 &last_pa))
542 			goto out;
543 
544 		DMAOBJ_TRACE("PA 0x%" PRIxPA " = 0x%" PRIxPA " + %zu", last_pa,
545 			     pabufs[idx].paddr, pabufs[idx].length);
546 
547 		entry = dmalist_add_entry(priv, &pabufs[idx]);
548 		if (!entry)
549 			goto out;
550 
551 		if (IS_DMA_OVERFLOW(last_pa)) {
552 			entry->nodma_access = true;
553 			if (ADD_OVERFLOW(priv->dmabuf.require,
554 					 pabufs[idx].length, &tmp))
555 				priv->dmabuf.require = SIZE_MAX;
556 			else
557 				priv->dmabuf.require = tmp;
558 		}
559 
560 		if (remlen > pabufs[idx].length)
561 			remlen -= pabufs[idx].length;
562 		else
563 			remlen = 0;
564 	}
565 
566 	/*
567 	 * Check the buffer alignment if the buffer is cacheable and
568 	 * an output buffer.
569 	 */
570 	if (priv->type & DMAOBJ_OUTPUT && !orig->nocache) {
571 		ret = check_buffer_alignment(priv, maxlen);
572 		if (ret)
573 			goto out;
574 	}
575 
576 	orig->length = maxlen;
577 
578 	ret = TEE_SUCCESS;
579 out:
580 	caam_free(pabufs);
581 	return ret;
582 }
583 
584 /*
585  * Re-map a DMA entry into a CAAM DMA accessible buffer.
586  * Create the SGT/Buffer entry to be used in the CAAM Descriptor
587  * Record this entry in the SGT/Buffer Data to get information on current
588  * working data.
589  *
590  * @obj         CAAM DMA object
591  * @entry       DMA entry to re-map
592  * @index       Index in the SGT/Buffer table
593  * @off         Start offset of the DMA entry data
594  */
entry_sgtbuf_dmabuf(struct caamdmaobj * obj,struct dmaentry * entry,unsigned int index,size_t off)595 static enum caam_status entry_sgtbuf_dmabuf(struct caamdmaobj *obj,
596 					    struct dmaentry *entry,
597 					    unsigned int index, size_t off)
598 {
599 	struct priv_dmaobj *priv = obj->priv;
600 	struct caambuf *sgtbuf = &obj->sgtbuf.buf[index];
601 	struct caamdmabuf *dmabuf = &priv->dmabuf;
602 
603 	if (!priv->dmabuf.allocated)
604 		return CAAM_OUT_MEMORY;
605 
606 	sgtbuf->data = dmabuf->buf.data + dmabuf->buf.length;
607 	sgtbuf->length = MIN(dmabuf->remind, entry->origbuf.length - off);
608 	sgtbuf->paddr = dmabuf->buf.paddr + dmabuf->buf.length;
609 	sgtbuf->nocache = dmabuf->buf.nocache;
610 	dmabuf->remind -= sgtbuf->length;
611 	dmabuf->buf.length += sgtbuf->length;
612 
613 	if (priv->type & DMAOBJ_INPUT)
614 		memcpy(sgtbuf->data, &entry->origbuf.data[off], sgtbuf->length);
615 	else
616 		entry->newbuf = true;
617 
618 	add_sgtdata_entry(obj, &priv->sgtdata[index], entry, sgtbuf, off);
619 
620 	return CAAM_NO_ERROR;
621 }
622 
623 /*
624  * Create the SGT/Buffer entry mapping the DMA @entry.
625  * Record these entry in the SGT/buffer Data to get information on current
626  * working data.
627  *
628  * @obj         CAAM DMA object
629  * @entry       DMA entry to re-map
630  * @index       Index in the SGT/Buffer table
631  * @off         Start offset of the DMA entry data
632  */
entry_sgtbuf(struct caamdmaobj * obj,struct dmaentry * entry,unsigned int index,size_t off)633 static enum caam_status entry_sgtbuf(struct caamdmaobj *obj,
634 				     struct dmaentry *entry, unsigned int index,
635 				     size_t off)
636 {
637 	struct priv_dmaobj *priv = obj->priv;
638 	struct caambuf *sgtbuf = &obj->sgtbuf.buf[index];
639 	struct sgtdata *sgtdata = &priv->sgtdata[index];
640 
641 	memcpy(sgtbuf, &entry->origbuf, sizeof(*sgtbuf));
642 	sgtbuf->data += off;
643 	sgtbuf->paddr += off;
644 	sgtbuf->length -= off;
645 
646 	DMAOBJ_TRACE("DMA buffer %p - %zu", sgtbuf->data, sgtbuf->length);
647 	add_sgtdata_entry(obj, sgtdata, entry, sgtbuf, off);
648 
649 	return CAAM_NO_ERROR;
650 }
651 
caam_dmaobj_init_input(struct caamdmaobj * obj,const void * data,size_t length)652 TEE_Result caam_dmaobj_init_input(struct caamdmaobj *obj, const void *data,
653 				  size_t length)
654 {
655 	TEE_Result ret = TEE_ERROR_GENERIC;
656 
657 	DMAOBJ_TRACE("Input object with data @%p of %zu bytes", data, length);
658 
659 	if (!data || !length || !obj) {
660 		ret = TEE_ERROR_BAD_PARAMETERS;
661 		goto err;
662 	}
663 
664 	obj->orig.paddr = virt_to_phys((void *)data);
665 	if (!obj->orig.paddr) {
666 		DMAOBJ_TRACE("Object virtual address error");
667 		ret = TEE_ERROR_BAD_PARAMETERS;
668 		goto err;
669 	}
670 
671 	obj->orig.data = (void *)data;
672 	obj->orig.length = length;
673 	if (!caam_mem_is_cached_buf((void *)data, length))
674 		obj->orig.nocache = 1;
675 
676 	ret = allocate_private(obj, DMAOBJ_INPUT);
677 	if (ret)
678 		goto err;
679 
680 	ret = check_buffer_boundary(obj, &obj->orig, obj->orig.length);
681 
682 	goto out;
683 err:
684 	caam_dmaobj_free(obj);
685 out:
686 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
687 	return ret;
688 }
689 
caam_dmaobj_input_sgtbuf(struct caamdmaobj * obj,const void * data,size_t length)690 TEE_Result caam_dmaobj_input_sgtbuf(struct caamdmaobj *obj, const void *data,
691 				    size_t length)
692 {
693 	TEE_Result ret = TEE_ERROR_GENERIC;
694 	size_t size_done = length;
695 
696 	ret = caam_dmaobj_init_input(obj, data, length);
697 	if (ret)
698 		goto err;
699 
700 	ret = caam_dmaobj_prepare(obj, NULL, length);
701 	if (ret)
702 		goto err;
703 
704 	ret = caam_dmaobj_sgtbuf_build(obj, &size_done, 0, length);
705 	if (ret)
706 		goto err;
707 
708 	if (size_done != length) {
709 		ret = TEE_ERROR_OUT_OF_MEMORY;
710 		goto err;
711 	}
712 
713 	return TEE_SUCCESS;
714 err:
715 	caam_dmaobj_free(obj);
716 	return ret;
717 }
718 
caam_dmaobj_init_output(struct caamdmaobj * obj,void * data,size_t length,size_t min_length)719 TEE_Result caam_dmaobj_init_output(struct caamdmaobj *obj, void *data,
720 				   size_t length, size_t min_length)
721 {
722 	TEE_Result ret = TEE_ERROR_GENERIC;
723 	struct dmaentry *entry = NULL;
724 	struct caambuf newbuf = {};
725 
726 	DMAOBJ_TRACE("Output object with data @%p of %zu bytes (%zu)", data,
727 		     length, min_length);
728 
729 	if (!obj) {
730 		ret = TEE_ERROR_BAD_PARAMETERS;
731 		goto out;
732 	}
733 
734 	ret = allocate_private(obj, DMAOBJ_OUTPUT);
735 	if (ret)
736 		goto err;
737 
738 	if (data) {
739 		obj->orig.paddr = virt_to_phys((void *)data);
740 		if (!obj->orig.paddr) {
741 			DMAOBJ_TRACE("Object virtual address error");
742 			ret = TEE_ERROR_BAD_PARAMETERS;
743 			goto err;
744 		}
745 
746 		obj->orig.data = (void *)data;
747 		obj->orig.length = length;
748 		if (!caam_mem_is_cached_buf((void *)data, length))
749 			obj->orig.nocache = 1;
750 
751 		ret = check_buffer_boundary(obj, &obj->orig,
752 					    MIN(min_length, obj->orig.length));
753 		if (ret)
754 			goto err;
755 	}
756 
757 	if (length < min_length || !data) {
758 		DMAOBJ_TRACE("Output buffer too short need %zu bytes (+%zu)",
759 			     min_length, min_length - length);
760 		newbuf.length = min_length - length;
761 
762 		entry = dmalist_add_entry(obj->priv, &newbuf);
763 		if (!entry) {
764 			ret = TEE_ERROR_OUT_OF_MEMORY;
765 			goto err;
766 		}
767 
768 		/* Add the additional size in the DMA buffer length */
769 		add_dma_require(obj->priv, newbuf.length);
770 
771 		entry->nocopy = true;
772 		entry->newbuf = true;
773 	}
774 
775 	ret = TEE_SUCCESS;
776 	goto out;
777 
778 err:
779 	caam_dmaobj_free(obj);
780 out:
781 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
782 	return ret;
783 }
784 
caam_dmaobj_output_sgtbuf(struct caamdmaobj * obj,void * data,size_t length,size_t min_length)785 TEE_Result caam_dmaobj_output_sgtbuf(struct caamdmaobj *obj, void *data,
786 				     size_t length, size_t min_length)
787 {
788 	enum caam_status retstatus = CAAM_FAILURE;
789 	TEE_Result ret = TEE_ERROR_GENERIC;
790 	struct priv_dmaobj *priv = NULL;
791 	size_t size = 0;
792 	struct caambuf buf = {};
793 
794 	if (!data && !length && min_length) {
795 		/*
796 		 * We are sure that the minimum size of the allocated
797 		 * buffer is a cache line, hence we know that
798 		 * start/end address are cache aligned.
799 		 * If the @min_length is less than a cache line size, we
800 		 * can initializing the output buffer with the cache line size
801 		 * to prevent end buffer misalignement so reallocate a not used
802 		 * buffer.
803 		 */
804 		size = MAX(min_length, dcache_get_line_size());
805 
806 		/* Allocate a new cache aligned buffer */
807 		retstatus = caam_alloc_align_buf(&buf, size);
808 		DMAOBJ_TRACE("New output buffer of %zu bytes ret 0x%" PRIx32,
809 			     min_length, retstatus);
810 		if (retstatus != CAAM_NO_ERROR)
811 			return caam_status_to_tee_result(retstatus);
812 
813 		ret = caam_dmaobj_init_output(obj, buf.data, buf.length, size);
814 		if (ret)
815 			return ret;
816 
817 		/* Set the correct origin buffer length asked */
818 		obj->orig.length = min_length;
819 
820 		/* Flag origin buffer as new allocation to free it */
821 		priv = obj->priv;
822 		priv->type |= DMAOBJ_ALLOC_ORIG;
823 	} else {
824 		ret = caam_dmaobj_init_output(obj, data, length, min_length);
825 		if (ret)
826 			return ret;
827 	}
828 
829 	ret = caam_dmaobj_prepare(NULL, obj, min_length);
830 	if (ret)
831 		return ret;
832 
833 	size = min_length;
834 	ret = caam_dmaobj_sgtbuf_build(obj, &size, 0, min_length);
835 	if (ret)
836 		return ret;
837 
838 	if (size != min_length)
839 		return TEE_ERROR_OUT_OF_MEMORY;
840 
841 	return TEE_SUCCESS;
842 }
843 
caam_dmaobj_cache_push(struct caamdmaobj * obj)844 void caam_dmaobj_cache_push(struct caamdmaobj *obj)
845 {
846 	struct priv_dmaobj *priv = NULL;
847 	enum utee_cache_operation op = TEE_CACHECLEAN;
848 
849 	if (!obj || !obj->priv)
850 		return;
851 
852 	priv = obj->priv;
853 	if (priv->type & DMAOBJ_OUTPUT)
854 		op = TEE_CACHEFLUSH;
855 
856 	dmaobj_cache_operation(op, obj);
857 }
858 
caam_dmaobj_copy_to_orig(struct caamdmaobj * obj)859 size_t caam_dmaobj_copy_to_orig(struct caamdmaobj *obj)
860 {
861 	struct priv_dmaobj *priv = NULL;
862 	unsigned int idx = 0;
863 	size_t length = 0;
864 	size_t dst_rlen = 0;
865 	size_t copy_size = 0;
866 
867 	if (!obj || !obj->orig.data || !obj->priv)
868 		return 0;
869 
870 	dmaobj_cache_operation(TEE_CACHEINVALIDATE, obj);
871 
872 	priv = obj->priv;
873 
874 	/*
875 	 * The maximum data size to copy cannot exceed the output buffer size
876 	 * (obj->orig.length) and cannot exceed the data processed by the
877 	 * CAAM (obj->sgtbuf.length).
878 	 */
879 	dst_rlen = MIN(obj->orig.length, obj->sgtbuf.length);
880 
881 	DMAOBJ_TRACE("Copy (len=%zu)", dst_rlen);
882 
883 	for (idx = 0; idx < obj->sgtbuf.number; idx++) {
884 		struct sgtdata *sgtdata = &priv->sgtdata[idx];
885 
886 		copy_size = MIN(dst_rlen, sgtdata->length);
887 		if (sgtdata->orig != sgtdata->dma && sgtdata->orig) {
888 			copy_size = MIN(dst_rlen, sgtdata->length);
889 			memcpy(sgtdata->orig, sgtdata->dma, copy_size);
890 		}
891 
892 		length += copy_size;
893 		dst_rlen -= copy_size;
894 	}
895 
896 	return length;
897 }
898 
caam_dmaobj_copy_ltrim_to_orig(struct caamdmaobj * obj)899 size_t caam_dmaobj_copy_ltrim_to_orig(struct caamdmaobj *obj)
900 {
901 	struct priv_dmaobj *priv = NULL;
902 	uint8_t *dst = NULL;
903 	size_t off = 0;
904 	size_t offset = 0;
905 	size_t dst_rlen = 0;
906 	size_t copy_size = 0;
907 	unsigned int idx = 0;
908 	size_t length = 0;
909 
910 	if (!obj || !obj->orig.data || !obj->priv)
911 		return 0;
912 
913 	dmaobj_cache_operation(TEE_CACHEINVALIDATE, obj);
914 
915 	priv = obj->priv;
916 
917 	/* Parse the SGT data list to discard leading zeros */
918 	for (idx = 0; idx < obj->sgtbuf.number; idx++) {
919 		struct sgtdata *sgtdata = &priv->sgtdata[idx];
920 
921 		if (!sgtdata->orig)
922 			continue;
923 
924 		for (offset = 0; offset < sgtdata->length; off++, offset++) {
925 			if (sgtdata->dma[offset])
926 				goto do_copy;
927 		}
928 	}
929 
930 do_copy:
931 	if (off < obj->orig.length)
932 		dst_rlen = obj->orig.length - off;
933 
934 	dst = obj->orig.data;
935 
936 	DMAOBJ_TRACE("Copy/Move Offset=%zu (len=%zu) TYPE=%d", off, dst_rlen,
937 		     obj->sgtbuf.sgt_type);
938 
939 	if (!dst_rlen) {
940 		dst[0] = 0;
941 		return 1;
942 	}
943 
944 	/*
945 	 * After discarding leading zeros in the SGT data list, start the copy
946 	 * operation on the remaining elements of the data list.
947 	 * List index must not be re-initialized before entering this loop.
948 	 */
949 	for (; idx < obj->sgtbuf.number; idx++) {
950 		struct sgtdata *sgtdata = &priv->sgtdata[idx];
951 
952 		if (!sgtdata->orig)
953 			continue;
954 
955 		if (offset) {
956 			copy_size = MIN(dst_rlen, sgtdata->length - offset);
957 			memmove(dst, &sgtdata->dma[offset], copy_size);
958 			offset = 0;
959 		} else {
960 			copy_size = MIN(dst_rlen, sgtdata->length);
961 			if (dst != sgtdata->dma)
962 				memmove(dst, sgtdata->dma, copy_size);
963 		}
964 
965 		dst += copy_size;
966 		dst_rlen -= copy_size;
967 		length += copy_size;
968 	}
969 
970 	return length;
971 }
972 
caam_dmaobj_free(struct caamdmaobj * obj)973 void caam_dmaobj_free(struct caamdmaobj *obj)
974 {
975 	struct priv_dmaobj *priv = NULL;
976 	struct dmaentry *entry = NULL;
977 	struct dmaentry *next = NULL;
978 	uint32_t exceptions = 0;
979 
980 	if (!obj)
981 		return;
982 
983 	exceptions = cpu_spin_lock_xsave(&memlock);
984 	priv = obj->priv;
985 	if (!priv)
986 		goto out;
987 
988 	DMAOBJ_TRACE("Free %s object with data @%p of %zu bytes",
989 		     priv->type & DMAOBJ_INPUT ? "Input" : "Output",
990 		     obj->orig.data, obj->orig.length);
991 
992 	TAILQ_FOREACH_SAFE(entry, &priv->list, link, next) {
993 		DMAOBJ_TRACE("Is type 0x%" PRIx8 " newbuf %s", priv->type,
994 			     entry->newbuf ? "true" : "false");
995 
996 		DMAOBJ_TRACE("Free entry %p", entry);
997 		caam_free(entry);
998 	}
999 
1000 	if (priv->nb_sgtbuf) {
1001 		DMAOBJ_TRACE("Free #%d SGT data %p", priv->nb_sgtbuf,
1002 			     priv->sgtdata);
1003 		caam_free(priv->sgtdata);
1004 
1005 		obj->sgtbuf.number = priv->nb_sgtbuf;
1006 		obj->sgtbuf.sgt_type = (priv->nb_sgtbuf > 1) ? true : false;
1007 	}
1008 
1009 	if (priv->dmabuf.allocated) {
1010 		DMAOBJ_TRACE("Free CAAM DMA buffer");
1011 		caam_free_buf(&priv->dmabuf.buf);
1012 	}
1013 
1014 	if (priv->type & DMAOBJ_ALLOC_ORIG) {
1015 		DMAOBJ_TRACE("Free Allocated origin");
1016 		caam_free_buf(&obj->orig);
1017 	}
1018 
1019 	DMAOBJ_TRACE("Free private object %p", priv);
1020 	caam_free(priv);
1021 
1022 out:
1023 	if (obj->sgtbuf.number) {
1024 		DMAOBJ_TRACE("Free #%d SGT/Buffer %p", obj->sgtbuf.number,
1025 			     &obj->sgtbuf);
1026 		caam_sgtbuf_free(&obj->sgtbuf);
1027 	}
1028 
1029 	memset(obj, 0, sizeof(*obj));
1030 
1031 	cpu_spin_unlock_xrestore(&memlock, exceptions);
1032 }
1033 
caam_dmaobj_add_first_block(struct caamdmaobj * obj,struct caamblock * block)1034 TEE_Result caam_dmaobj_add_first_block(struct caamdmaobj *obj,
1035 				       struct caamblock *block)
1036 {
1037 	struct priv_dmaobj *priv = NULL;
1038 	struct caambuf newbuf = {};
1039 	struct dmaentry *entry = NULL;
1040 
1041 	if (!obj || !obj->priv || !block)
1042 		return TEE_ERROR_BAD_PARAMETERS;
1043 
1044 	priv = obj->priv;
1045 
1046 	/* Save the block buffer reference and insert it at the head list */
1047 	newbuf.data = block->buf.data;
1048 	newbuf.length = block->filled;
1049 	newbuf.paddr = block->buf.paddr;
1050 	newbuf.nocache = block->buf.nocache;
1051 
1052 	entry = dmalist_add_entry_head(priv, &newbuf);
1053 
1054 	if (!entry)
1055 		return TEE_ERROR_OUT_OF_MEMORY;
1056 
1057 	/*
1058 	 * Block buffer added in the output DMA buffer doesn't have to
1059 	 * be part of the output copy to origin buffer.
1060 	 */
1061 	if (priv->type & DMAOBJ_OUTPUT)
1062 		entry->nocopy = true;
1063 
1064 	return TEE_SUCCESS;
1065 }
1066 
caam_dmaobj_derive_sgtbuf(struct caamdmaobj * obj,const struct caamdmaobj * from,size_t offset,size_t length)1067 TEE_Result caam_dmaobj_derive_sgtbuf(struct caamdmaobj *obj,
1068 				     const struct caamdmaobj *from,
1069 				     size_t offset, size_t length)
1070 {
1071 	TEE_Result ret = TEE_ERROR_GENERIC;
1072 	enum caam_status retstatus = CAAM_FAILURE;
1073 	struct priv_dmaobj *priv = NULL;
1074 
1075 	DMAOBJ_TRACE("Derive object %p - offset %zu - length %zu bytes", from,
1076 		     offset, length);
1077 
1078 	if (!obj || !from || !length || !from->priv) {
1079 		ret = TEE_ERROR_BAD_PARAMETERS;
1080 		goto out;
1081 	}
1082 
1083 	if (!from->orig.data || !from->orig.length) {
1084 		DMAOBJ_TRACE("No data/length to derive from");
1085 		ret = TEE_ERROR_NO_DATA;
1086 		goto out;
1087 	}
1088 
1089 	priv = from->priv;
1090 	if (!priv->nb_sgtbuf) {
1091 		DMAOBJ_TRACE("From SGT/Buffer not prepared");
1092 		ret = TEE_ERROR_NO_DATA;
1093 		goto out;
1094 	}
1095 
1096 	retstatus = caam_sgt_derive(&obj->sgtbuf, &from->sgtbuf, offset,
1097 				    length);
1098 
1099 	ret = caam_status_to_tee_result(retstatus);
1100 
1101 out:
1102 	DMAOBJ_TRACE("Object returns 0x%" PRIx32, ret);
1103 	return ret;
1104 }
1105 
1106 /*
1107  * Get the maximum allocation size for the given CAAM DMA object.
1108  * Return the maximum allocation size.
1109  *
1110  * @obj CAAM DMA object
1111  */
get_dma_max_alloc_size(struct caamdmaobj * obj)1112 static size_t get_dma_max_alloc_size(struct caamdmaobj *obj)
1113 {
1114 	size_t alloc_size = 0;
1115 	struct priv_dmaobj *priv = NULL;
1116 
1117 	if (!obj)
1118 		return 0;
1119 
1120 	priv = obj->priv;
1121 
1122 	DMAOBJ_TRACE("DMA buffer size require %zu", priv->dmabuf.require);
1123 	alloc_size = MIN(priv->dmabuf.require, MAX_BUFFER_ALLOC_SIZE);
1124 	if (alloc_size > 1024)
1125 		alloc_size = ROUNDDOWN(alloc_size, 1024);
1126 
1127 	return alloc_size;
1128 }
1129 
1130 /*
1131  * Allocate the CAAM DMA buffer.
1132  * First, try to allocate the with the maximum size. If it fails, try to
1133  * allocate with the same size divided by two. Try to allocate until
1134  * minimum size is reached. If the allocation cannot be done with the
1135  * minimum size, return TEE_ERROR_OUT_OF_MEMORY, TEE_SUCCESS otherwise.
1136  *
1137  * @obj       CAAM DMA object
1138  * @min_size  minimum size allocation
1139  * @size[out] successful allocation size
1140  */
try_allocate_dmabuf_max_size(struct caamdmaobj * obj,size_t min_size,size_t * size)1141 static TEE_Result try_allocate_dmabuf_max_size(struct caamdmaobj *obj,
1142 					       size_t min_size,
1143 					       size_t *size)
1144 {
1145 	TEE_Result ret = TEE_ERROR_GENERIC;
1146 	size_t alloc_size = 0;
1147 	struct priv_dmaobj *priv = NULL;
1148 	bool try_alloc = false;
1149 	uint32_t exceptions = 0;
1150 
1151 	alloc_size = get_dma_max_alloc_size(obj);
1152 	if (alloc_size) {
1153 		try_alloc = true;
1154 	} else {
1155 		ret = TEE_SUCCESS;
1156 		goto out;
1157 	}
1158 
1159 	priv = obj->priv;
1160 
1161 	exceptions = cpu_spin_lock_xsave(&memlock);
1162 
1163 	while (try_alloc) {
1164 		ret = try_allocate_dmabuf(priv, alloc_size);
1165 		if (!ret) {
1166 			try_alloc = false;
1167 		} else {
1168 			if (alloc_size > min_size)
1169 				alloc_size = MAX(min_size, alloc_size / 2);
1170 			else
1171 				try_alloc = false;
1172 		}
1173 	}
1174 
1175 	cpu_spin_unlock_xrestore(&memlock, exceptions);
1176 
1177 out:
1178 	*size = alloc_size;
1179 
1180 	return ret;
1181 }
1182 
caam_dmaobj_prepare(struct caamdmaobj * input,struct caamdmaobj * output,size_t min_size)1183 TEE_Result caam_dmaobj_prepare(struct caamdmaobj *input,
1184 			       struct caamdmaobj *output, size_t min_size)
1185 {
1186 	TEE_Result ret = TEE_ERROR_GENERIC;
1187 	size_t alloc_input = 0;
1188 	size_t alloc_output = 0;
1189 
1190 	if (!input && !output) {
1191 		ret = TEE_ERROR_BAD_PARAMETERS;
1192 		goto out;
1193 	}
1194 
1195 	if ((input && !input->priv) || (output && !output->priv)) {
1196 		ret = TEE_ERROR_BAD_PARAMETERS;
1197 		goto out;
1198 	}
1199 
1200 	DMAOBJ_TRACE("input=%p - output=%p - min=%zu", input, output, min_size);
1201 
1202 	ret = try_allocate_dmabuf_max_size(input, min_size, &alloc_input);
1203 	if (ret)
1204 		goto out;
1205 
1206 	ret = try_allocate_dmabuf_max_size(output, min_size, &alloc_output);
1207 	if (ret)
1208 		goto out;
1209 
1210 out:
1211 	DMAOBJ_TRACE("Allocation (input %zu, output %zu) returns 0x%" PRIx32,
1212 		     input ? alloc_input : 0, output ? alloc_output : 0,
1213 		     ret);
1214 
1215 	return ret;
1216 }
1217 
caam_dmaobj_sgtbuf_inout_build(struct caamdmaobj * input,struct caamdmaobj * output,size_t * length,size_t off,size_t align)1218 TEE_Result caam_dmaobj_sgtbuf_inout_build(struct caamdmaobj *input,
1219 					  struct caamdmaobj *output,
1220 					  size_t *length, size_t off,
1221 					  size_t align)
1222 {
1223 	TEE_Result ret = TEE_ERROR_GENERIC;
1224 	size_t len = 0;
1225 
1226 	DMAOBJ_TRACE("input=%p/output=%p %zu bytes (offset=%zu, align=%zu)",
1227 		     input, output, *length, off, align);
1228 
1229 	if (!input || !output || !length || !input->priv || !output->priv ||
1230 	    !*length) {
1231 		ret = TEE_ERROR_BAD_PARAMETERS;
1232 		goto out;
1233 	}
1234 
1235 	/*
1236 	 * First build the input SGT/Buffer
1237 	 */
1238 	ret = caam_dmaobj_sgtbuf_build(input, length, off, align);
1239 	if (ret)
1240 		goto out;
1241 
1242 	/*
1243 	 * Next build the output SGT/Buffer.
1244 	 * If returned length is not same as input, redo the input
1245 	 * SGT/Buffer with the same length as the output.
1246 	 */
1247 	len = *length;
1248 	ret = caam_dmaobj_sgtbuf_build(output, &len, off, *length);
1249 	if (ret)
1250 		goto out;
1251 
1252 	if (len != *length) {
1253 		DMAOBJ_TRACE("Retry In %zu bytes vs Out %zu bytes", *length,
1254 			     len);
1255 
1256 		/* Redo the input with the output length */
1257 		*length = len;
1258 		ret = caam_dmaobj_sgtbuf_build(input, length, off, len);
1259 		if (!ret && *length != len) {
1260 			DMAOBJ_TRACE("Error In %zu bytes vs Out %zu bytes",
1261 				     *length, len);
1262 			ret = TEE_ERROR_OUT_OF_MEMORY;
1263 		}
1264 	}
1265 
1266 out:
1267 	DMAOBJ_TRACE("Input/Output SGTBUF returns 0x%" PRIx32, ret);
1268 
1269 	return ret;
1270 }
1271 
caam_dmaobj_sgtbuf_build(struct caamdmaobj * obj,size_t * length,size_t off,size_t align)1272 TEE_Result caam_dmaobj_sgtbuf_build(struct caamdmaobj *obj, size_t *length,
1273 				    size_t off, size_t align)
1274 {
1275 	TEE_Result ret = TEE_ERROR_GENERIC;
1276 	enum caam_status retstatus = CAAM_FAILURE;
1277 	struct priv_dmaobj *priv = NULL;
1278 	struct dmaentry *entry = NULL;
1279 	struct dmaentry *start_entry = NULL;
1280 	size_t max_length = 0;
1281 	size_t acc_length = 0;
1282 	size_t offset = off;
1283 	unsigned int idx = 0;
1284 	unsigned int nb_sgt = 0;
1285 
1286 	DMAOBJ_TRACE("obj=%p of %zu bytes (offset=%zu) - align %zu", obj,
1287 		     *length, off, align);
1288 
1289 	if (!obj || !obj->priv || !length || !*length) {
1290 		ret = TEE_ERROR_BAD_PARAMETERS;
1291 		goto out;
1292 	}
1293 
1294 	priv = obj->priv;
1295 
1296 	max_length = *length;
1297 	if (priv->dmabuf.allocated && max_length > priv->dmabuf.allocated &&
1298 	    priv->dmabuf.allocated > align)
1299 		max_length = ROUNDDOWN2(priv->dmabuf.allocated, align);
1300 
1301 	DMAOBJ_TRACE("Prepare SGT/Buffer to do %zu of %zu", max_length,
1302 		     *length);
1303 
1304 	/* Find the first DMA buffer to start with */
1305 	TAILQ_FOREACH(entry, &priv->list, link)	{
1306 		if (offset < entry->origbuf.length)
1307 			break;
1308 
1309 		offset -= entry->origbuf.length;
1310 	}
1311 
1312 	if (!entry) {
1313 		DMAOBJ_TRACE("There is no DMA Object available");
1314 		ret = TEE_ERROR_GENERIC;
1315 		goto out;
1316 	}
1317 
1318 	start_entry = entry;
1319 	DMAOBJ_TRACE("Start with %p data %p offset %zu", start_entry,
1320 		     start_entry->origbuf.data, offset);
1321 
1322 	acc_length = entry->origbuf.length - offset;
1323 	nb_sgt = 1;
1324 
1325 	/* Calculate the number of SGT entry */
1326 	for (entry = TAILQ_NEXT(entry, link); entry && acc_length < max_length;
1327 	     entry = TAILQ_NEXT(entry, link)) {
1328 		acc_length += entry->origbuf.length;
1329 		nb_sgt++;
1330 	}
1331 
1332 	DMAOBJ_TRACE("%d of %d SGT/Buffer entries to handle", nb_sgt,
1333 		     priv->nb_sgtbuf);
1334 	if (priv->nb_sgtbuf < nb_sgt) {
1335 		if (priv->nb_sgtbuf) {
1336 			obj->sgtbuf.number = priv->nb_sgtbuf;
1337 			obj->sgtbuf.sgt_type = (priv->nb_sgtbuf > 1);
1338 
1339 			caam_sgtbuf_free(&obj->sgtbuf);
1340 			caam_free(priv->sgtdata);
1341 			priv->nb_sgtbuf = 0;
1342 		}
1343 
1344 		obj->sgtbuf.number = nb_sgt;
1345 		obj->sgtbuf.sgt_type = (nb_sgt > 1) ? true : false;
1346 
1347 		/* Allocate a new SGT/Buffer object */
1348 		retstatus = caam_sgtbuf_alloc(&obj->sgtbuf);
1349 		DMAOBJ_TRACE("Allocate %d SGT entries ret 0x%" PRIx32,
1350 			     obj->sgtbuf.number, retstatus);
1351 		if (retstatus != CAAM_NO_ERROR) {
1352 			ret = caam_status_to_tee_result(retstatus);
1353 			goto out;
1354 		}
1355 
1356 		priv->sgtdata = caam_calloc(nb_sgt * sizeof(*priv->sgtdata));
1357 		if (!priv->sgtdata) {
1358 			ret = TEE_ERROR_OUT_OF_MEMORY;
1359 			goto out;
1360 		}
1361 
1362 		priv->nb_sgtbuf = nb_sgt;
1363 	} else {
1364 		obj->sgtbuf.number = nb_sgt;
1365 		obj->sgtbuf.sgt_type = (nb_sgt > 1) ? true : false;
1366 	}
1367 
1368 	/* Reset the DMA Buffer index if allocated */
1369 	if (priv->dmabuf.allocated) {
1370 		priv->dmabuf.remind = priv->dmabuf.allocated;
1371 		priv->dmabuf.buf.length = 0;
1372 	}
1373 
1374 	obj->sgtbuf.length = 0;
1375 	for (entry = start_entry; entry && idx < nb_sgt;
1376 	     entry = TAILQ_NEXT(entry, link), idx++) {
1377 		DMAOBJ_TRACE("entry %p (%d)", entry, idx);
1378 		if (entry->nodma_access || entry->newbuf) {
1379 			retstatus = entry_sgtbuf_dmabuf(obj, entry, idx,
1380 							offset);
1381 			if (retstatus != CAAM_NO_ERROR) {
1382 				ret = caam_status_to_tee_result(retstatus);
1383 				goto out;
1384 			}
1385 		} else {
1386 			retstatus = entry_sgtbuf(obj, entry, idx, offset);
1387 			if (retstatus != CAAM_NO_ERROR) {
1388 				ret = caam_status_to_tee_result(retstatus);
1389 				goto out;
1390 			}
1391 		}
1392 
1393 		if (obj->sgtbuf.length >= max_length) {
1394 			DMAOBJ_TRACE("Hold-on enough length %zu", max_length);
1395 			obj->sgtbuf.length = max_length;
1396 			break;
1397 		}
1398 		offset = 0;
1399 	}
1400 
1401 	if (obj->sgtbuf.sgt_type) {
1402 		/* Build the SGT table based on the physical area list */
1403 		caam_sgt_fill_table(&obj->sgtbuf);
1404 
1405 		obj->sgtbuf.paddr = virt_to_phys(obj->sgtbuf.sgt);
1406 	} else {
1407 		obj->sgtbuf.paddr = obj->sgtbuf.buf->paddr;
1408 	}
1409 
1410 	*length = obj->sgtbuf.length;
1411 	ret = TEE_SUCCESS;
1412 out:
1413 	DMAOBJ_TRACE("SGTBUF (%zu) returns 0x%" PRIx32, *length, ret);
1414 	return ret;
1415 }
1416