1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ff-transaction.c - a part of driver for RME Fireface series
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2015-2017 Takashi Sakamoto
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "ff.h"
9*4882a593Smuzhiyun
finish_transmit_midi_msg(struct snd_ff * ff,unsigned int port,int rcode)10*4882a593Smuzhiyun static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
11*4882a593Smuzhiyun int rcode)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun struct snd_rawmidi_substream *substream =
14*4882a593Smuzhiyun READ_ONCE(ff->rx_midi_substreams[port]);
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun if (rcode_is_permanent_error(rcode)) {
17*4882a593Smuzhiyun ff->rx_midi_error[port] = true;
18*4882a593Smuzhiyun return;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun if (rcode != RCODE_COMPLETE) {
22*4882a593Smuzhiyun /* Transfer the message again, immediately. */
23*4882a593Smuzhiyun ff->next_ktime[port] = 0;
24*4882a593Smuzhiyun schedule_work(&ff->rx_midi_work[port]);
25*4882a593Smuzhiyun return;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun snd_rawmidi_transmit_ack(substream, ff->rx_bytes[port]);
29*4882a593Smuzhiyun ff->rx_bytes[port] = 0;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun if (!snd_rawmidi_transmit_empty(substream))
32*4882a593Smuzhiyun schedule_work(&ff->rx_midi_work[port]);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
finish_transmit_midi0_msg(struct fw_card * card,int rcode,void * data,size_t length,void * callback_data)35*4882a593Smuzhiyun static void finish_transmit_midi0_msg(struct fw_card *card, int rcode,
36*4882a593Smuzhiyun void *data, size_t length,
37*4882a593Smuzhiyun void *callback_data)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct snd_ff *ff =
40*4882a593Smuzhiyun container_of(callback_data, struct snd_ff, transactions[0]);
41*4882a593Smuzhiyun finish_transmit_midi_msg(ff, 0, rcode);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
finish_transmit_midi1_msg(struct fw_card * card,int rcode,void * data,size_t length,void * callback_data)44*4882a593Smuzhiyun static void finish_transmit_midi1_msg(struct fw_card *card, int rcode,
45*4882a593Smuzhiyun void *data, size_t length,
46*4882a593Smuzhiyun void *callback_data)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct snd_ff *ff =
49*4882a593Smuzhiyun container_of(callback_data, struct snd_ff, transactions[1]);
50*4882a593Smuzhiyun finish_transmit_midi_msg(ff, 1, rcode);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
transmit_midi_msg(struct snd_ff * ff,unsigned int port)53*4882a593Smuzhiyun static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct snd_rawmidi_substream *substream =
56*4882a593Smuzhiyun READ_ONCE(ff->rx_midi_substreams[port]);
57*4882a593Smuzhiyun int quad_count;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct fw_device *fw_dev = fw_parent_device(ff->unit);
60*4882a593Smuzhiyun unsigned long long addr;
61*4882a593Smuzhiyun int generation;
62*4882a593Smuzhiyun fw_transaction_callback_t callback;
63*4882a593Smuzhiyun int tcode;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (substream == NULL || snd_rawmidi_transmit_empty(substream))
66*4882a593Smuzhiyun return;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (ff->rx_bytes[port] > 0 || ff->rx_midi_error[port])
69*4882a593Smuzhiyun return;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Do it in next chance. */
72*4882a593Smuzhiyun if (ktime_after(ff->next_ktime[port], ktime_get())) {
73*4882a593Smuzhiyun schedule_work(&ff->rx_midi_work[port]);
74*4882a593Smuzhiyun return;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun quad_count = ff->spec->protocol->fill_midi_msg(ff, substream, port);
78*4882a593Smuzhiyun if (quad_count <= 0)
79*4882a593Smuzhiyun return;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (port == 0) {
82*4882a593Smuzhiyun addr = ff->spec->midi_rx_addrs[0];
83*4882a593Smuzhiyun callback = finish_transmit_midi0_msg;
84*4882a593Smuzhiyun } else {
85*4882a593Smuzhiyun addr = ff->spec->midi_rx_addrs[1];
86*4882a593Smuzhiyun callback = finish_transmit_midi1_msg;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* Set interval to next transaction. */
90*4882a593Smuzhiyun ff->next_ktime[port] = ktime_add_ns(ktime_get(),
91*4882a593Smuzhiyun ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (quad_count == 1)
94*4882a593Smuzhiyun tcode = TCODE_WRITE_QUADLET_REQUEST;
95*4882a593Smuzhiyun else
96*4882a593Smuzhiyun tcode = TCODE_WRITE_BLOCK_REQUEST;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * In Linux FireWire core, when generation is updated with memory
100*4882a593Smuzhiyun * barrier, node id has already been updated. In this module, After
101*4882a593Smuzhiyun * this smp_rmb(), load/store instructions to memory are completed.
102*4882a593Smuzhiyun * Thus, both of generation and node id are available with recent
103*4882a593Smuzhiyun * values. This is a light-serialization solution to handle bus reset
104*4882a593Smuzhiyun * events on IEEE 1394 bus.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun generation = fw_dev->generation;
107*4882a593Smuzhiyun smp_rmb();
108*4882a593Smuzhiyun fw_send_request(fw_dev->card, &ff->transactions[port], tcode,
109*4882a593Smuzhiyun fw_dev->node_id, generation, fw_dev->max_speed,
110*4882a593Smuzhiyun addr, &ff->msg_buf[port], quad_count * 4,
111*4882a593Smuzhiyun callback, &ff->transactions[port]);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
transmit_midi0_msg(struct work_struct * work)114*4882a593Smuzhiyun static void transmit_midi0_msg(struct work_struct *work)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[0]);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun transmit_midi_msg(ff, 0);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
transmit_midi1_msg(struct work_struct * work)121*4882a593Smuzhiyun static void transmit_midi1_msg(struct work_struct *work)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[1]);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun transmit_midi_msg(ff, 1);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
handle_midi_msg(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * data,size_t length,void * callback_data)128*4882a593Smuzhiyun static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
129*4882a593Smuzhiyun int tcode, int destination, int source,
130*4882a593Smuzhiyun int generation, unsigned long long offset,
131*4882a593Smuzhiyun void *data, size_t length, void *callback_data)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct snd_ff *ff = callback_data;
134*4882a593Smuzhiyun __le32 *buf = data;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun fw_send_response(card, request, RCODE_COMPLETE);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun offset -= ff->async_handler.offset;
139*4882a593Smuzhiyun ff->spec->protocol->handle_midi_msg(ff, (unsigned int)offset, buf,
140*4882a593Smuzhiyun length);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
allocate_own_address(struct snd_ff * ff,int i)143*4882a593Smuzhiyun static int allocate_own_address(struct snd_ff *ff, int i)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct fw_address_region midi_msg_region;
146*4882a593Smuzhiyun int err;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun ff->async_handler.length = ff->spec->midi_addr_range;
149*4882a593Smuzhiyun ff->async_handler.address_callback = handle_midi_msg;
150*4882a593Smuzhiyun ff->async_handler.callback_data = ff;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun midi_msg_region.start = 0x000100000000ull * i;
153*4882a593Smuzhiyun midi_msg_region.end = midi_msg_region.start + ff->async_handler.length;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun err = fw_core_add_address_handler(&ff->async_handler, &midi_msg_region);
156*4882a593Smuzhiyun if (err >= 0) {
157*4882a593Smuzhiyun /* Controllers are allowed to register this region. */
158*4882a593Smuzhiyun if (ff->async_handler.offset & 0x0000ffffffff) {
159*4882a593Smuzhiyun fw_core_remove_address_handler(&ff->async_handler);
160*4882a593Smuzhiyun err = -EAGAIN;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return err;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun // Controllers are allowed to register higher 4 bytes of destination address to
168*4882a593Smuzhiyun // receive asynchronous transactions for MIDI messages, while the way to
169*4882a593Smuzhiyun // register lower 4 bytes of address is different depending on protocols. For
170*4882a593Smuzhiyun // details, please refer to comments in protocol implementations.
171*4882a593Smuzhiyun //
172*4882a593Smuzhiyun // This driver expects userspace applications to configure registers for the
173*4882a593Smuzhiyun // lower address because in most cases such registers has the other settings.
snd_ff_transaction_reregister(struct snd_ff * ff)174*4882a593Smuzhiyun int snd_ff_transaction_reregister(struct snd_ff *ff)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct fw_card *fw_card = fw_parent_device(ff->unit)->card;
177*4882a593Smuzhiyun u32 addr;
178*4882a593Smuzhiyun __le32 reg;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Controllers are allowed to register its node ID and upper 2 byte of
182*4882a593Smuzhiyun * local address to listen asynchronous transactions.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun addr = (fw_card->node_id << 16) | (ff->async_handler.offset >> 32);
185*4882a593Smuzhiyun reg = cpu_to_le32(addr);
186*4882a593Smuzhiyun return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
187*4882a593Smuzhiyun ff->spec->midi_high_addr,
188*4882a593Smuzhiyun ®, sizeof(reg), 0);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
snd_ff_transaction_register(struct snd_ff * ff)191*4882a593Smuzhiyun int snd_ff_transaction_register(struct snd_ff *ff)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun int i, err;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * Allocate in Memory Space of IEC 13213, but lower 4 byte in LSB should
197*4882a593Smuzhiyun * be zero due to device specification.
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun for (i = 0; i < 0xffff; i++) {
200*4882a593Smuzhiyun err = allocate_own_address(ff, i);
201*4882a593Smuzhiyun if (err != -EBUSY && err != -EAGAIN)
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun if (err < 0)
205*4882a593Smuzhiyun return err;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun err = snd_ff_transaction_reregister(ff);
208*4882a593Smuzhiyun if (err < 0)
209*4882a593Smuzhiyun return err;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun INIT_WORK(&ff->rx_midi_work[0], transmit_midi0_msg);
212*4882a593Smuzhiyun INIT_WORK(&ff->rx_midi_work[1], transmit_midi1_msg);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
snd_ff_transaction_unregister(struct snd_ff * ff)217*4882a593Smuzhiyun void snd_ff_transaction_unregister(struct snd_ff *ff)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun __le32 reg;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (ff->async_handler.callback_data == NULL)
222*4882a593Smuzhiyun return;
223*4882a593Smuzhiyun ff->async_handler.callback_data = NULL;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Release higher 4 bytes of address. */
226*4882a593Smuzhiyun reg = cpu_to_le32(0x00000000);
227*4882a593Smuzhiyun snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
228*4882a593Smuzhiyun ff->spec->midi_high_addr,
229*4882a593Smuzhiyun ®, sizeof(reg), 0);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun fw_core_remove_address_handler(&ff->async_handler);
232*4882a593Smuzhiyun }
233