1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /* Copyright (C) 2006-2017 Oracle Corporation */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/vbox_err.h>
5*4882a593Smuzhiyun #include "vbox_drv.h"
6*4882a593Smuzhiyun #include "vboxvideo_guest.h"
7*4882a593Smuzhiyun #include "hgsmi_channels.h"
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * There is a hardware ring buffer in the graphics device video RAM, formerly
11*4882a593Smuzhiyun * in the VBox VMMDev PCI memory space.
12*4882a593Smuzhiyun * All graphics commands go there serialized by vbva_buffer_begin_update.
13*4882a593Smuzhiyun * and vbva_buffer_end_update.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * free_offset is writing position. data_offset is reading position.
16*4882a593Smuzhiyun * free_offset == data_offset means buffer is empty.
17*4882a593Smuzhiyun * There must be always gap between data_offset and free_offset when data
18*4882a593Smuzhiyun * are in the buffer.
19*4882a593Smuzhiyun * Guest only changes free_offset, host changes data_offset.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
vbva_buffer_available(const struct vbva_buffer * vbva)22*4882a593Smuzhiyun static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun s32 diff = vbva->data_offset - vbva->free_offset;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun return diff > 0 ? diff : vbva->data_len + diff;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
vbva_buffer_place_data_at(struct vbva_buf_ctx * vbva_ctx,const void * p,u32 len,u32 offset)29*4882a593Smuzhiyun static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
30*4882a593Smuzhiyun const void *p, u32 len, u32 offset)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun struct vbva_buffer *vbva = vbva_ctx->vbva;
33*4882a593Smuzhiyun u32 bytes_till_boundary = vbva->data_len - offset;
34*4882a593Smuzhiyun u8 *dst = &vbva->data[offset];
35*4882a593Smuzhiyun s32 diff = len - bytes_till_boundary;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (diff <= 0) {
38*4882a593Smuzhiyun /* Chunk will not cross buffer boundary. */
39*4882a593Smuzhiyun memcpy(dst, p, len);
40*4882a593Smuzhiyun } else {
41*4882a593Smuzhiyun /* Chunk crosses buffer boundary. */
42*4882a593Smuzhiyun memcpy(dst, p, bytes_till_boundary);
43*4882a593Smuzhiyun memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
vbva_buffer_flush(struct gen_pool * ctx)47*4882a593Smuzhiyun static void vbva_buffer_flush(struct gen_pool *ctx)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct vbva_flush *p;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
52*4882a593Smuzhiyun if (!p)
53*4882a593Smuzhiyun return;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun p->reserved = 0;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun hgsmi_buffer_submit(ctx, p);
58*4882a593Smuzhiyun hgsmi_buffer_free(ctx, p);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
vbva_write(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,const void * p,u32 len)61*4882a593Smuzhiyun bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
62*4882a593Smuzhiyun const void *p, u32 len)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct vbva_record *record;
65*4882a593Smuzhiyun struct vbva_buffer *vbva;
66*4882a593Smuzhiyun u32 available;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun vbva = vbva_ctx->vbva;
69*4882a593Smuzhiyun record = vbva_ctx->record;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (!vbva || vbva_ctx->buffer_overflow ||
72*4882a593Smuzhiyun !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
73*4882a593Smuzhiyun return false;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun available = vbva_buffer_available(vbva);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun while (len > 0) {
78*4882a593Smuzhiyun u32 chunk = len;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (chunk >= available) {
81*4882a593Smuzhiyun vbva_buffer_flush(ctx);
82*4882a593Smuzhiyun available = vbva_buffer_available(vbva);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (chunk >= available) {
86*4882a593Smuzhiyun if (WARN_ON(available <= vbva->partial_write_tresh)) {
87*4882a593Smuzhiyun vbva_ctx->buffer_overflow = true;
88*4882a593Smuzhiyun return false;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun chunk = available - vbva->partial_write_tresh;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun vbva_buffer_place_data_at(vbva_ctx, p, chunk,
94*4882a593Smuzhiyun vbva->free_offset);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun vbva->free_offset = (vbva->free_offset + chunk) %
97*4882a593Smuzhiyun vbva->data_len;
98*4882a593Smuzhiyun record->len_and_flags += chunk;
99*4882a593Smuzhiyun available -= chunk;
100*4882a593Smuzhiyun len -= chunk;
101*4882a593Smuzhiyun p += chunk;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return true;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
vbva_inform_host(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,s32 screen,bool enable)107*4882a593Smuzhiyun static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
108*4882a593Smuzhiyun struct gen_pool *ctx, s32 screen, bool enable)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct vbva_enable_ex *p;
111*4882a593Smuzhiyun bool ret;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
114*4882a593Smuzhiyun if (!p)
115*4882a593Smuzhiyun return false;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
118*4882a593Smuzhiyun p->base.offset = vbva_ctx->buffer_offset;
119*4882a593Smuzhiyun p->base.result = VERR_NOT_SUPPORTED;
120*4882a593Smuzhiyun if (screen >= 0) {
121*4882a593Smuzhiyun p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
122*4882a593Smuzhiyun p->screen_id = screen;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun hgsmi_buffer_submit(ctx, p);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (enable)
128*4882a593Smuzhiyun ret = p->base.result >= 0;
129*4882a593Smuzhiyun else
130*4882a593Smuzhiyun ret = true;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun hgsmi_buffer_free(ctx, p);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return ret;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
vbva_enable(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,struct vbva_buffer * vbva,s32 screen)137*4882a593Smuzhiyun bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
138*4882a593Smuzhiyun struct vbva_buffer *vbva, s32 screen)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun bool ret = false;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun memset(vbva, 0, sizeof(*vbva));
143*4882a593Smuzhiyun vbva->partial_write_tresh = 256;
144*4882a593Smuzhiyun vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
145*4882a593Smuzhiyun vbva_ctx->vbva = vbva;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
148*4882a593Smuzhiyun if (!ret)
149*4882a593Smuzhiyun vbva_disable(vbva_ctx, ctx, screen);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun return ret;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
vbva_disable(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,s32 screen)154*4882a593Smuzhiyun void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
155*4882a593Smuzhiyun s32 screen)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun vbva_ctx->buffer_overflow = false;
158*4882a593Smuzhiyun vbva_ctx->record = NULL;
159*4882a593Smuzhiyun vbva_ctx->vbva = NULL;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun vbva_inform_host(vbva_ctx, ctx, screen, false);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
vbva_buffer_begin_update(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx)164*4882a593Smuzhiyun bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
165*4882a593Smuzhiyun struct gen_pool *ctx)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct vbva_record *record;
168*4882a593Smuzhiyun u32 next;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!vbva_ctx->vbva ||
171*4882a593Smuzhiyun !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
172*4882a593Smuzhiyun return false;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Flush if all slots in the records queue are used */
179*4882a593Smuzhiyun if (next == vbva_ctx->vbva->record_first_index)
180*4882a593Smuzhiyun vbva_buffer_flush(ctx);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* If even after flush there is no place then fail the request */
183*4882a593Smuzhiyun if (next == vbva_ctx->vbva->record_first_index)
184*4882a593Smuzhiyun return false;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
187*4882a593Smuzhiyun record->len_and_flags = VBVA_F_RECORD_PARTIAL;
188*4882a593Smuzhiyun vbva_ctx->vbva->record_free_index = next;
189*4882a593Smuzhiyun /* Remember which record we are using. */
190*4882a593Smuzhiyun vbva_ctx->record = record;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return true;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
vbva_buffer_end_update(struct vbva_buf_ctx * vbva_ctx)195*4882a593Smuzhiyun void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct vbva_record *record = vbva_ctx->record;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun WARN_ON(!vbva_ctx->vbva || !record ||
200*4882a593Smuzhiyun !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Mark the record completed. */
203*4882a593Smuzhiyun record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun vbva_ctx->buffer_overflow = false;
206*4882a593Smuzhiyun vbva_ctx->record = NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
vbva_setup_buffer_context(struct vbva_buf_ctx * vbva_ctx,u32 buffer_offset,u32 buffer_length)209*4882a593Smuzhiyun void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
210*4882a593Smuzhiyun u32 buffer_offset, u32 buffer_length)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun vbva_ctx->buffer_offset = buffer_offset;
213*4882a593Smuzhiyun vbva_ctx->buffer_length = buffer_length;
214*4882a593Smuzhiyun }
215