1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Generic bounce buffer implementation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2012 Marek Vasut <marex@denx.de>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <malloc.h>
11*4882a593Smuzhiyun #include <errno.h>
12*4882a593Smuzhiyun #include <bouncebuf.h>
13*4882a593Smuzhiyun
addr_aligned(struct bounce_buffer * state)14*4882a593Smuzhiyun static int addr_aligned(struct bounce_buffer *state)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun const ulong align_mask = ARCH_DMA_MINALIGN - 1;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* Check if start is aligned */
19*4882a593Smuzhiyun if ((ulong)state->user_buffer & align_mask) {
20*4882a593Smuzhiyun debug("Unaligned buffer address %p\n", state->user_buffer);
21*4882a593Smuzhiyun return 0;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Check if length is aligned */
25*4882a593Smuzhiyun if (state->len != state->len_aligned) {
26*4882a593Smuzhiyun debug("Unaligned buffer length %zu\n", state->len);
27*4882a593Smuzhiyun return 0;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Aligned */
31*4882a593Smuzhiyun return 1;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
bounce_buffer_start(struct bounce_buffer * state,void * data,size_t len,unsigned int flags)34*4882a593Smuzhiyun int bounce_buffer_start(struct bounce_buffer *state, void *data,
35*4882a593Smuzhiyun size_t len, unsigned int flags)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun state->user_buffer = data;
38*4882a593Smuzhiyun state->bounce_buffer = data;
39*4882a593Smuzhiyun state->len = len;
40*4882a593Smuzhiyun state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
41*4882a593Smuzhiyun state->flags = flags;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (!addr_aligned(state)) {
44*4882a593Smuzhiyun state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
45*4882a593Smuzhiyun state->len_aligned);
46*4882a593Smuzhiyun if (!state->bounce_buffer)
47*4882a593Smuzhiyun return -ENOMEM;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (state->flags & GEN_BB_READ)
50*4882a593Smuzhiyun memcpy(state->bounce_buffer, state->user_buffer,
51*4882a593Smuzhiyun state->len);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Flush data to RAM so DMA reads can pick it up,
56*4882a593Smuzhiyun * and any CPU writebacks don't race with DMA writes
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun flush_dcache_range((unsigned long)state->bounce_buffer,
59*4882a593Smuzhiyun (unsigned long)(state->bounce_buffer) +
60*4882a593Smuzhiyun state->len_aligned);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
bounce_buffer_stop(struct bounce_buffer * state)65*4882a593Smuzhiyun int bounce_buffer_stop(struct bounce_buffer *state)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun if (state->flags & GEN_BB_WRITE) {
68*4882a593Smuzhiyun /* Invalidate cache so that CPU can see any newly DMA'd data */
69*4882a593Smuzhiyun invalidate_dcache_range((unsigned long)state->bounce_buffer,
70*4882a593Smuzhiyun (unsigned long)(state->bounce_buffer) +
71*4882a593Smuzhiyun state->len_aligned);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (state->bounce_buffer == state->user_buffer)
75*4882a593Smuzhiyun return 0;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (state->flags & GEN_BB_WRITE)
78*4882a593Smuzhiyun memcpy(state->user_buffer, state->bounce_buffer, state->len);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun free(state->bounce_buffer);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun }
84