1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Core IEEE1394 transaction logic
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/bug.h>
9*4882a593Smuzhiyun #include <linux/completion.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/firewire.h>
13*4882a593Smuzhiyun #include <linux/firewire-constants.h>
14*4882a593Smuzhiyun #include <linux/fs.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/idr.h>
17*4882a593Smuzhiyun #include <linux/jiffies.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/rculist.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun #include <linux/string.h>
25*4882a593Smuzhiyun #include <linux/timer.h>
26*4882a593Smuzhiyun #include <linux/types.h>
27*4882a593Smuzhiyun #include <linux/workqueue.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <asm/byteorder.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "core.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define HEADER_PRI(pri) ((pri) << 0)
34*4882a593Smuzhiyun #define HEADER_TCODE(tcode) ((tcode) << 4)
35*4882a593Smuzhiyun #define HEADER_RETRY(retry) ((retry) << 8)
36*4882a593Smuzhiyun #define HEADER_TLABEL(tlabel) ((tlabel) << 10)
37*4882a593Smuzhiyun #define HEADER_DESTINATION(destination) ((destination) << 16)
38*4882a593Smuzhiyun #define HEADER_SOURCE(source) ((source) << 16)
39*4882a593Smuzhiyun #define HEADER_RCODE(rcode) ((rcode) << 12)
40*4882a593Smuzhiyun #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
41*4882a593Smuzhiyun #define HEADER_DATA_LENGTH(length) ((length) << 16)
42*4882a593Smuzhiyun #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
45*4882a593Smuzhiyun #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
46*4882a593Smuzhiyun #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
47*4882a593Smuzhiyun #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
48*4882a593Smuzhiyun #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
49*4882a593Smuzhiyun #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
50*4882a593Smuzhiyun #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
51*4882a593Smuzhiyun #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define HEADER_DESTINATION_IS_BROADCAST(q) \
54*4882a593Smuzhiyun (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define PHY_PACKET_CONFIG 0x0
57*4882a593Smuzhiyun #define PHY_PACKET_LINK_ON 0x1
58*4882a593Smuzhiyun #define PHY_PACKET_SELF_ID 0x2
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
61*4882a593Smuzhiyun #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
62*4882a593Smuzhiyun #define PHY_IDENTIFIER(id) ((id) << 30)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* returns 0 if the split timeout handler is already running */
try_cancel_split_timeout(struct fw_transaction * t)65*4882a593Smuzhiyun static int try_cancel_split_timeout(struct fw_transaction *t)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun if (t->is_split_transaction)
68*4882a593Smuzhiyun return del_timer(&t->split_timeout_timer);
69*4882a593Smuzhiyun else
70*4882a593Smuzhiyun return 1;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
close_transaction(struct fw_transaction * transaction,struct fw_card * card,int rcode)73*4882a593Smuzhiyun static int close_transaction(struct fw_transaction *transaction,
74*4882a593Smuzhiyun struct fw_card *card, int rcode)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct fw_transaction *t = NULL, *iter;
77*4882a593Smuzhiyun unsigned long flags;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
80*4882a593Smuzhiyun list_for_each_entry(iter, &card->transaction_list, link) {
81*4882a593Smuzhiyun if (iter == transaction) {
82*4882a593Smuzhiyun if (!try_cancel_split_timeout(iter)) {
83*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
84*4882a593Smuzhiyun goto timed_out;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun list_del_init(&iter->link);
87*4882a593Smuzhiyun card->tlabel_mask &= ~(1ULL << iter->tlabel);
88*4882a593Smuzhiyun t = iter;
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (t) {
95*4882a593Smuzhiyun t->callback(card, rcode, NULL, 0, t->callback_data);
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun timed_out:
100*4882a593Smuzhiyun return -ENOENT;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Only valid for transactions that are potentially pending (ie have
105*4882a593Smuzhiyun * been sent).
106*4882a593Smuzhiyun */
fw_cancel_transaction(struct fw_card * card,struct fw_transaction * transaction)107*4882a593Smuzhiyun int fw_cancel_transaction(struct fw_card *card,
108*4882a593Smuzhiyun struct fw_transaction *transaction)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Cancel the packet transmission if it's still queued. That
112*4882a593Smuzhiyun * will call the packet transmission callback which cancels
113*4882a593Smuzhiyun * the transaction.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (card->driver->cancel_packet(card, &transaction->packet) == 0)
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * If the request packet has already been sent, we need to see
121*4882a593Smuzhiyun * if the transaction is still pending and remove it in that case.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return close_transaction(transaction, card, RCODE_CANCELLED);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL(fw_cancel_transaction);
127*4882a593Smuzhiyun
split_transaction_timeout_callback(struct timer_list * timer)128*4882a593Smuzhiyun static void split_transaction_timeout_callback(struct timer_list *timer)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
131*4882a593Smuzhiyun struct fw_card *card = t->card;
132*4882a593Smuzhiyun unsigned long flags;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
135*4882a593Smuzhiyun if (list_empty(&t->link)) {
136*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
137*4882a593Smuzhiyun return;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun list_del(&t->link);
140*4882a593Smuzhiyun card->tlabel_mask &= ~(1ULL << t->tlabel);
141*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
start_split_transaction_timeout(struct fw_transaction * t,struct fw_card * card)146*4882a593Smuzhiyun static void start_split_transaction_timeout(struct fw_transaction *t,
147*4882a593Smuzhiyun struct fw_card *card)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun unsigned long flags;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
154*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
155*4882a593Smuzhiyun return;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun t->is_split_transaction = true;
159*4882a593Smuzhiyun mod_timer(&t->split_timeout_timer,
160*4882a593Smuzhiyun jiffies + card->split_timeout_jiffies);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
transmit_complete_callback(struct fw_packet * packet,struct fw_card * card,int status)165*4882a593Smuzhiyun static void transmit_complete_callback(struct fw_packet *packet,
166*4882a593Smuzhiyun struct fw_card *card, int status)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct fw_transaction *t =
169*4882a593Smuzhiyun container_of(packet, struct fw_transaction, packet);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun switch (status) {
172*4882a593Smuzhiyun case ACK_COMPLETE:
173*4882a593Smuzhiyun close_transaction(t, card, RCODE_COMPLETE);
174*4882a593Smuzhiyun break;
175*4882a593Smuzhiyun case ACK_PENDING:
176*4882a593Smuzhiyun start_split_transaction_timeout(t, card);
177*4882a593Smuzhiyun break;
178*4882a593Smuzhiyun case ACK_BUSY_X:
179*4882a593Smuzhiyun case ACK_BUSY_A:
180*4882a593Smuzhiyun case ACK_BUSY_B:
181*4882a593Smuzhiyun close_transaction(t, card, RCODE_BUSY);
182*4882a593Smuzhiyun break;
183*4882a593Smuzhiyun case ACK_DATA_ERROR:
184*4882a593Smuzhiyun close_transaction(t, card, RCODE_DATA_ERROR);
185*4882a593Smuzhiyun break;
186*4882a593Smuzhiyun case ACK_TYPE_ERROR:
187*4882a593Smuzhiyun close_transaction(t, card, RCODE_TYPE_ERROR);
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun default:
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * In this case the ack is really a juju specific
192*4882a593Smuzhiyun * rcode, so just forward that to the callback.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun close_transaction(t, card, status);
195*4882a593Smuzhiyun break;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
fw_fill_request(struct fw_packet * packet,int tcode,int tlabel,int destination_id,int source_id,int generation,int speed,unsigned long long offset,void * payload,size_t length)199*4882a593Smuzhiyun static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
200*4882a593Smuzhiyun int destination_id, int source_id, int generation, int speed,
201*4882a593Smuzhiyun unsigned long long offset, void *payload, size_t length)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int ext_tcode;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (tcode == TCODE_STREAM_DATA) {
206*4882a593Smuzhiyun packet->header[0] =
207*4882a593Smuzhiyun HEADER_DATA_LENGTH(length) |
208*4882a593Smuzhiyun destination_id |
209*4882a593Smuzhiyun HEADER_TCODE(TCODE_STREAM_DATA);
210*4882a593Smuzhiyun packet->header_length = 4;
211*4882a593Smuzhiyun packet->payload = payload;
212*4882a593Smuzhiyun packet->payload_length = length;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun goto common;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (tcode > 0x10) {
218*4882a593Smuzhiyun ext_tcode = tcode & ~0x10;
219*4882a593Smuzhiyun tcode = TCODE_LOCK_REQUEST;
220*4882a593Smuzhiyun } else
221*4882a593Smuzhiyun ext_tcode = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun packet->header[0] =
224*4882a593Smuzhiyun HEADER_RETRY(RETRY_X) |
225*4882a593Smuzhiyun HEADER_TLABEL(tlabel) |
226*4882a593Smuzhiyun HEADER_TCODE(tcode) |
227*4882a593Smuzhiyun HEADER_DESTINATION(destination_id);
228*4882a593Smuzhiyun packet->header[1] =
229*4882a593Smuzhiyun HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
230*4882a593Smuzhiyun packet->header[2] =
231*4882a593Smuzhiyun offset;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun switch (tcode) {
234*4882a593Smuzhiyun case TCODE_WRITE_QUADLET_REQUEST:
235*4882a593Smuzhiyun packet->header[3] = *(u32 *)payload;
236*4882a593Smuzhiyun packet->header_length = 16;
237*4882a593Smuzhiyun packet->payload_length = 0;
238*4882a593Smuzhiyun break;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun case TCODE_LOCK_REQUEST:
241*4882a593Smuzhiyun case TCODE_WRITE_BLOCK_REQUEST:
242*4882a593Smuzhiyun packet->header[3] =
243*4882a593Smuzhiyun HEADER_DATA_LENGTH(length) |
244*4882a593Smuzhiyun HEADER_EXTENDED_TCODE(ext_tcode);
245*4882a593Smuzhiyun packet->header_length = 16;
246*4882a593Smuzhiyun packet->payload = payload;
247*4882a593Smuzhiyun packet->payload_length = length;
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun case TCODE_READ_QUADLET_REQUEST:
251*4882a593Smuzhiyun packet->header_length = 12;
252*4882a593Smuzhiyun packet->payload_length = 0;
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun case TCODE_READ_BLOCK_REQUEST:
256*4882a593Smuzhiyun packet->header[3] =
257*4882a593Smuzhiyun HEADER_DATA_LENGTH(length) |
258*4882a593Smuzhiyun HEADER_EXTENDED_TCODE(ext_tcode);
259*4882a593Smuzhiyun packet->header_length = 16;
260*4882a593Smuzhiyun packet->payload_length = 0;
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun default:
264*4882a593Smuzhiyun WARN(1, "wrong tcode %d\n", tcode);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun common:
267*4882a593Smuzhiyun packet->speed = speed;
268*4882a593Smuzhiyun packet->generation = generation;
269*4882a593Smuzhiyun packet->ack = 0;
270*4882a593Smuzhiyun packet->payload_mapped = false;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
allocate_tlabel(struct fw_card * card)273*4882a593Smuzhiyun static int allocate_tlabel(struct fw_card *card)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun int tlabel;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun tlabel = card->current_tlabel;
278*4882a593Smuzhiyun while (card->tlabel_mask & (1ULL << tlabel)) {
279*4882a593Smuzhiyun tlabel = (tlabel + 1) & 0x3f;
280*4882a593Smuzhiyun if (tlabel == card->current_tlabel)
281*4882a593Smuzhiyun return -EBUSY;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun card->current_tlabel = (tlabel + 1) & 0x3f;
285*4882a593Smuzhiyun card->tlabel_mask |= 1ULL << tlabel;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return tlabel;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun * fw_send_request() - submit a request packet for transmission
292*4882a593Smuzhiyun * @card: interface to send the request at
293*4882a593Smuzhiyun * @t: transaction instance to which the request belongs
294*4882a593Smuzhiyun * @tcode: transaction code
295*4882a593Smuzhiyun * @destination_id: destination node ID, consisting of bus_ID and phy_ID
296*4882a593Smuzhiyun * @generation: bus generation in which request and response are valid
297*4882a593Smuzhiyun * @speed: transmission speed
298*4882a593Smuzhiyun * @offset: 48bit wide offset into destination's address space
299*4882a593Smuzhiyun * @payload: data payload for the request subaction
300*4882a593Smuzhiyun * @length: length of the payload, in bytes
301*4882a593Smuzhiyun * @callback: function to be called when the transaction is completed
302*4882a593Smuzhiyun * @callback_data: data to be passed to the transaction completion callback
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * Submit a request packet into the asynchronous request transmission queue.
305*4882a593Smuzhiyun * Can be called from atomic context. If you prefer a blocking API, use
306*4882a593Smuzhiyun * fw_run_transaction() in a context that can sleep.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * In case of lock requests, specify one of the firewire-core specific %TCODE_
309*4882a593Smuzhiyun * constants instead of %TCODE_LOCK_REQUEST in @tcode.
310*4882a593Smuzhiyun *
311*4882a593Smuzhiyun * Make sure that the value in @destination_id is not older than the one in
312*4882a593Smuzhiyun * @generation. Otherwise the request is in danger to be sent to a wrong node.
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
315*4882a593Smuzhiyun * needs to synthesize @destination_id with fw_stream_packet_destination_id().
316*4882a593Smuzhiyun * It will contain tag, channel, and sy data instead of a node ID then.
317*4882a593Smuzhiyun *
318*4882a593Smuzhiyun * The payload buffer at @data is going to be DMA-mapped except in case of
319*4882a593Smuzhiyun * @length <= 8 or of local (loopback) requests. Hence make sure that the
320*4882a593Smuzhiyun * buffer complies with the restrictions of the streaming DMA mapping API.
321*4882a593Smuzhiyun * @payload must not be freed before the @callback is called.
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * In case of request types without payload, @data is NULL and @length is 0.
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * After the transaction is completed successfully or unsuccessfully, the
326*4882a593Smuzhiyun * @callback will be called. Among its parameters is the response code which
327*4882a593Smuzhiyun * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
328*4882a593Smuzhiyun * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
329*4882a593Smuzhiyun * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
330*4882a593Smuzhiyun * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
331*4882a593Smuzhiyun * generation, or missing ACK respectively.
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * Note some timing corner cases: fw_send_request() may complete much earlier
334*4882a593Smuzhiyun * than when the request packet actually hits the wire. On the other hand,
335*4882a593Smuzhiyun * transaction completion and hence execution of @callback may happen even
336*4882a593Smuzhiyun * before fw_send_request() returns.
337*4882a593Smuzhiyun */
fw_send_request(struct fw_card * card,struct fw_transaction * t,int tcode,int destination_id,int generation,int speed,unsigned long long offset,void * payload,size_t length,fw_transaction_callback_t callback,void * callback_data)338*4882a593Smuzhiyun void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
339*4882a593Smuzhiyun int destination_id, int generation, int speed,
340*4882a593Smuzhiyun unsigned long long offset, void *payload, size_t length,
341*4882a593Smuzhiyun fw_transaction_callback_t callback, void *callback_data)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun unsigned long flags;
344*4882a593Smuzhiyun int tlabel;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun * Allocate tlabel from the bitmap and put the transaction on
348*4882a593Smuzhiyun * the list while holding the card spinlock.
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun tlabel = allocate_tlabel(card);
354*4882a593Smuzhiyun if (tlabel < 0) {
355*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
356*4882a593Smuzhiyun callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
357*4882a593Smuzhiyun return;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun t->node_id = destination_id;
361*4882a593Smuzhiyun t->tlabel = tlabel;
362*4882a593Smuzhiyun t->card = card;
363*4882a593Smuzhiyun t->is_split_transaction = false;
364*4882a593Smuzhiyun timer_setup(&t->split_timeout_timer,
365*4882a593Smuzhiyun split_transaction_timeout_callback, 0);
366*4882a593Smuzhiyun t->callback = callback;
367*4882a593Smuzhiyun t->callback_data = callback_data;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun fw_fill_request(&t->packet, tcode, t->tlabel,
370*4882a593Smuzhiyun destination_id, card->node_id, generation,
371*4882a593Smuzhiyun speed, offset, payload, length);
372*4882a593Smuzhiyun t->packet.callback = transmit_complete_callback;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun list_add_tail(&t->link, &card->transaction_list);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun card->driver->send_request(card, &t->packet);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun EXPORT_SYMBOL(fw_send_request);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun struct transaction_callback_data {
383*4882a593Smuzhiyun struct completion done;
384*4882a593Smuzhiyun void *payload;
385*4882a593Smuzhiyun int rcode;
386*4882a593Smuzhiyun };
387*4882a593Smuzhiyun
transaction_callback(struct fw_card * card,int rcode,void * payload,size_t length,void * data)388*4882a593Smuzhiyun static void transaction_callback(struct fw_card *card, int rcode,
389*4882a593Smuzhiyun void *payload, size_t length, void *data)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct transaction_callback_data *d = data;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (rcode == RCODE_COMPLETE)
394*4882a593Smuzhiyun memcpy(d->payload, payload, length);
395*4882a593Smuzhiyun d->rcode = rcode;
396*4882a593Smuzhiyun complete(&d->done);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /**
400*4882a593Smuzhiyun * fw_run_transaction() - send request and sleep until transaction is completed
401*4882a593Smuzhiyun * @card: card interface for this request
402*4882a593Smuzhiyun * @tcode: transaction code
403*4882a593Smuzhiyun * @destination_id: destination node ID, consisting of bus_ID and phy_ID
404*4882a593Smuzhiyun * @generation: bus generation in which request and response are valid
405*4882a593Smuzhiyun * @speed: transmission speed
406*4882a593Smuzhiyun * @offset: 48bit wide offset into destination's address space
407*4882a593Smuzhiyun * @payload: data payload for the request subaction
408*4882a593Smuzhiyun * @length: length of the payload, in bytes
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * Returns the RCODE. See fw_send_request() for parameter documentation.
411*4882a593Smuzhiyun * Unlike fw_send_request(), @data points to the payload of the request or/and
412*4882a593Smuzhiyun * to the payload of the response. DMA mapping restrictions apply to outbound
413*4882a593Smuzhiyun * request payloads of >= 8 bytes but not to inbound response payloads.
414*4882a593Smuzhiyun */
fw_run_transaction(struct fw_card * card,int tcode,int destination_id,int generation,int speed,unsigned long long offset,void * payload,size_t length)415*4882a593Smuzhiyun int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
416*4882a593Smuzhiyun int generation, int speed, unsigned long long offset,
417*4882a593Smuzhiyun void *payload, size_t length)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct transaction_callback_data d;
420*4882a593Smuzhiyun struct fw_transaction t;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
423*4882a593Smuzhiyun init_completion(&d.done);
424*4882a593Smuzhiyun d.payload = payload;
425*4882a593Smuzhiyun fw_send_request(card, &t, tcode, destination_id, generation, speed,
426*4882a593Smuzhiyun offset, payload, length, transaction_callback, &d);
427*4882a593Smuzhiyun wait_for_completion(&d.done);
428*4882a593Smuzhiyun destroy_timer_on_stack(&t.split_timeout_timer);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun return d.rcode;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun EXPORT_SYMBOL(fw_run_transaction);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun static DEFINE_MUTEX(phy_config_mutex);
435*4882a593Smuzhiyun static DECLARE_COMPLETION(phy_config_done);
436*4882a593Smuzhiyun
transmit_phy_packet_callback(struct fw_packet * packet,struct fw_card * card,int status)437*4882a593Smuzhiyun static void transmit_phy_packet_callback(struct fw_packet *packet,
438*4882a593Smuzhiyun struct fw_card *card, int status)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun complete(&phy_config_done);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun static struct fw_packet phy_config_packet = {
444*4882a593Smuzhiyun .header_length = 12,
445*4882a593Smuzhiyun .header[0] = TCODE_LINK_INTERNAL << 4,
446*4882a593Smuzhiyun .payload_length = 0,
447*4882a593Smuzhiyun .speed = SCODE_100,
448*4882a593Smuzhiyun .callback = transmit_phy_packet_callback,
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun
fw_send_phy_config(struct fw_card * card,int node_id,int generation,int gap_count)451*4882a593Smuzhiyun void fw_send_phy_config(struct fw_card *card,
452*4882a593Smuzhiyun int node_id, int generation, int gap_count)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun long timeout = DIV_ROUND_UP(HZ, 10);
455*4882a593Smuzhiyun u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
458*4882a593Smuzhiyun data |= PHY_CONFIG_ROOT_ID(node_id);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
461*4882a593Smuzhiyun gap_count = card->driver->read_phy_reg(card, 1);
462*4882a593Smuzhiyun if (gap_count < 0)
463*4882a593Smuzhiyun return;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun gap_count &= 63;
466*4882a593Smuzhiyun if (gap_count == 63)
467*4882a593Smuzhiyun return;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun data |= PHY_CONFIG_GAP_COUNT(gap_count);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun mutex_lock(&phy_config_mutex);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun phy_config_packet.header[1] = data;
474*4882a593Smuzhiyun phy_config_packet.header[2] = ~data;
475*4882a593Smuzhiyun phy_config_packet.generation = generation;
476*4882a593Smuzhiyun reinit_completion(&phy_config_done);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun card->driver->send_request(card, &phy_config_packet);
479*4882a593Smuzhiyun wait_for_completion_timeout(&phy_config_done, timeout);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun mutex_unlock(&phy_config_mutex);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
lookup_overlapping_address_handler(struct list_head * list,unsigned long long offset,size_t length)484*4882a593Smuzhiyun static struct fw_address_handler *lookup_overlapping_address_handler(
485*4882a593Smuzhiyun struct list_head *list, unsigned long long offset, size_t length)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct fw_address_handler *handler;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun list_for_each_entry_rcu(handler, list, link) {
490*4882a593Smuzhiyun if (handler->offset < offset + length &&
491*4882a593Smuzhiyun offset < handler->offset + handler->length)
492*4882a593Smuzhiyun return handler;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return NULL;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
is_enclosing_handler(struct fw_address_handler * handler,unsigned long long offset,size_t length)498*4882a593Smuzhiyun static bool is_enclosing_handler(struct fw_address_handler *handler,
499*4882a593Smuzhiyun unsigned long long offset, size_t length)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun return handler->offset <= offset &&
502*4882a593Smuzhiyun offset + length <= handler->offset + handler->length;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
lookup_enclosing_address_handler(struct list_head * list,unsigned long long offset,size_t length)505*4882a593Smuzhiyun static struct fw_address_handler *lookup_enclosing_address_handler(
506*4882a593Smuzhiyun struct list_head *list, unsigned long long offset, size_t length)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct fw_address_handler *handler;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun list_for_each_entry_rcu(handler, list, link) {
511*4882a593Smuzhiyun if (is_enclosing_handler(handler, offset, length))
512*4882a593Smuzhiyun return handler;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun return NULL;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun static DEFINE_SPINLOCK(address_handler_list_lock);
519*4882a593Smuzhiyun static LIST_HEAD(address_handler_list);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun const struct fw_address_region fw_high_memory_region =
522*4882a593Smuzhiyun { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
523*4882a593Smuzhiyun EXPORT_SYMBOL(fw_high_memory_region);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun static const struct fw_address_region low_memory_region =
526*4882a593Smuzhiyun { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun #if 0
529*4882a593Smuzhiyun const struct fw_address_region fw_private_region =
530*4882a593Smuzhiyun { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
531*4882a593Smuzhiyun const struct fw_address_region fw_csr_region =
532*4882a593Smuzhiyun { .start = CSR_REGISTER_BASE,
533*4882a593Smuzhiyun .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
534*4882a593Smuzhiyun const struct fw_address_region fw_unit_space_region =
535*4882a593Smuzhiyun { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
536*4882a593Smuzhiyun #endif /* 0 */
537*4882a593Smuzhiyun
is_in_fcp_region(u64 offset,size_t length)538*4882a593Smuzhiyun static bool is_in_fcp_region(u64 offset, size_t length)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
541*4882a593Smuzhiyun offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /**
545*4882a593Smuzhiyun * fw_core_add_address_handler() - register for incoming requests
546*4882a593Smuzhiyun * @handler: callback
547*4882a593Smuzhiyun * @region: region in the IEEE 1212 node space address range
548*4882a593Smuzhiyun *
549*4882a593Smuzhiyun * region->start, ->end, and handler->length have to be quadlet-aligned.
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * When a request is received that falls within the specified address range,
552*4882a593Smuzhiyun * the specified callback is invoked. The parameters passed to the callback
553*4882a593Smuzhiyun * give the details of the particular request.
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * To be called in process context.
556*4882a593Smuzhiyun * Return value: 0 on success, non-zero otherwise.
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * The start offset of the handler's address region is determined by
559*4882a593Smuzhiyun * fw_core_add_address_handler() and is returned in handler->offset.
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * Address allocations are exclusive, except for the FCP registers.
562*4882a593Smuzhiyun */
fw_core_add_address_handler(struct fw_address_handler * handler,const struct fw_address_region * region)563*4882a593Smuzhiyun int fw_core_add_address_handler(struct fw_address_handler *handler,
564*4882a593Smuzhiyun const struct fw_address_region *region)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct fw_address_handler *other;
567*4882a593Smuzhiyun int ret = -EBUSY;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (region->start & 0xffff000000000003ULL ||
570*4882a593Smuzhiyun region->start >= region->end ||
571*4882a593Smuzhiyun region->end > 0x0001000000000000ULL ||
572*4882a593Smuzhiyun handler->length & 3 ||
573*4882a593Smuzhiyun handler->length == 0)
574*4882a593Smuzhiyun return -EINVAL;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun spin_lock(&address_handler_list_lock);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun handler->offset = region->start;
579*4882a593Smuzhiyun while (handler->offset + handler->length <= region->end) {
580*4882a593Smuzhiyun if (is_in_fcp_region(handler->offset, handler->length))
581*4882a593Smuzhiyun other = NULL;
582*4882a593Smuzhiyun else
583*4882a593Smuzhiyun other = lookup_overlapping_address_handler
584*4882a593Smuzhiyun (&address_handler_list,
585*4882a593Smuzhiyun handler->offset, handler->length);
586*4882a593Smuzhiyun if (other != NULL) {
587*4882a593Smuzhiyun handler->offset += other->length;
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun list_add_tail_rcu(&handler->link, &address_handler_list);
590*4882a593Smuzhiyun ret = 0;
591*4882a593Smuzhiyun break;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun spin_unlock(&address_handler_list_lock);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun return ret;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_add_address_handler);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /**
602*4882a593Smuzhiyun * fw_core_remove_address_handler() - unregister an address handler
603*4882a593Smuzhiyun * @handler: callback
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * To be called in process context.
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * When fw_core_remove_address_handler() returns, @handler->callback() is
608*4882a593Smuzhiyun * guaranteed to not run on any CPU anymore.
609*4882a593Smuzhiyun */
fw_core_remove_address_handler(struct fw_address_handler * handler)610*4882a593Smuzhiyun void fw_core_remove_address_handler(struct fw_address_handler *handler)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun spin_lock(&address_handler_list_lock);
613*4882a593Smuzhiyun list_del_rcu(&handler->link);
614*4882a593Smuzhiyun spin_unlock(&address_handler_list_lock);
615*4882a593Smuzhiyun synchronize_rcu();
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_remove_address_handler);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun struct fw_request {
620*4882a593Smuzhiyun struct fw_packet response;
621*4882a593Smuzhiyun u32 request_header[4];
622*4882a593Smuzhiyun int ack;
623*4882a593Smuzhiyun u32 length;
624*4882a593Smuzhiyun u32 data[];
625*4882a593Smuzhiyun };
626*4882a593Smuzhiyun
free_response_callback(struct fw_packet * packet,struct fw_card * card,int status)627*4882a593Smuzhiyun static void free_response_callback(struct fw_packet *packet,
628*4882a593Smuzhiyun struct fw_card *card, int status)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct fw_request *request;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun request = container_of(packet, struct fw_request, response);
633*4882a593Smuzhiyun kfree(request);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
fw_get_response_length(struct fw_request * r)636*4882a593Smuzhiyun int fw_get_response_length(struct fw_request *r)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun int tcode, ext_tcode, data_length;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun tcode = HEADER_GET_TCODE(r->request_header[0]);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun switch (tcode) {
643*4882a593Smuzhiyun case TCODE_WRITE_QUADLET_REQUEST:
644*4882a593Smuzhiyun case TCODE_WRITE_BLOCK_REQUEST:
645*4882a593Smuzhiyun return 0;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun case TCODE_READ_QUADLET_REQUEST:
648*4882a593Smuzhiyun return 4;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun case TCODE_READ_BLOCK_REQUEST:
651*4882a593Smuzhiyun data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
652*4882a593Smuzhiyun return data_length;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun case TCODE_LOCK_REQUEST:
655*4882a593Smuzhiyun ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
656*4882a593Smuzhiyun data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
657*4882a593Smuzhiyun switch (ext_tcode) {
658*4882a593Smuzhiyun case EXTCODE_FETCH_ADD:
659*4882a593Smuzhiyun case EXTCODE_LITTLE_ADD:
660*4882a593Smuzhiyun return data_length;
661*4882a593Smuzhiyun default:
662*4882a593Smuzhiyun return data_length / 2;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun default:
666*4882a593Smuzhiyun WARN(1, "wrong tcode %d\n", tcode);
667*4882a593Smuzhiyun return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
fw_fill_response(struct fw_packet * response,u32 * request_header,int rcode,void * payload,size_t length)671*4882a593Smuzhiyun void fw_fill_response(struct fw_packet *response, u32 *request_header,
672*4882a593Smuzhiyun int rcode, void *payload, size_t length)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun int tcode, tlabel, extended_tcode, source, destination;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun tcode = HEADER_GET_TCODE(request_header[0]);
677*4882a593Smuzhiyun tlabel = HEADER_GET_TLABEL(request_header[0]);
678*4882a593Smuzhiyun source = HEADER_GET_DESTINATION(request_header[0]);
679*4882a593Smuzhiyun destination = HEADER_GET_SOURCE(request_header[1]);
680*4882a593Smuzhiyun extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun response->header[0] =
683*4882a593Smuzhiyun HEADER_RETRY(RETRY_1) |
684*4882a593Smuzhiyun HEADER_TLABEL(tlabel) |
685*4882a593Smuzhiyun HEADER_DESTINATION(destination);
686*4882a593Smuzhiyun response->header[1] =
687*4882a593Smuzhiyun HEADER_SOURCE(source) |
688*4882a593Smuzhiyun HEADER_RCODE(rcode);
689*4882a593Smuzhiyun response->header[2] = 0;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun switch (tcode) {
692*4882a593Smuzhiyun case TCODE_WRITE_QUADLET_REQUEST:
693*4882a593Smuzhiyun case TCODE_WRITE_BLOCK_REQUEST:
694*4882a593Smuzhiyun response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
695*4882a593Smuzhiyun response->header_length = 12;
696*4882a593Smuzhiyun response->payload_length = 0;
697*4882a593Smuzhiyun break;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun case TCODE_READ_QUADLET_REQUEST:
700*4882a593Smuzhiyun response->header[0] |=
701*4882a593Smuzhiyun HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
702*4882a593Smuzhiyun if (payload != NULL)
703*4882a593Smuzhiyun response->header[3] = *(u32 *)payload;
704*4882a593Smuzhiyun else
705*4882a593Smuzhiyun response->header[3] = 0;
706*4882a593Smuzhiyun response->header_length = 16;
707*4882a593Smuzhiyun response->payload_length = 0;
708*4882a593Smuzhiyun break;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun case TCODE_READ_BLOCK_REQUEST:
711*4882a593Smuzhiyun case TCODE_LOCK_REQUEST:
712*4882a593Smuzhiyun response->header[0] |= HEADER_TCODE(tcode + 2);
713*4882a593Smuzhiyun response->header[3] =
714*4882a593Smuzhiyun HEADER_DATA_LENGTH(length) |
715*4882a593Smuzhiyun HEADER_EXTENDED_TCODE(extended_tcode);
716*4882a593Smuzhiyun response->header_length = 16;
717*4882a593Smuzhiyun response->payload = payload;
718*4882a593Smuzhiyun response->payload_length = length;
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun default:
722*4882a593Smuzhiyun WARN(1, "wrong tcode %d\n", tcode);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun response->payload_mapped = false;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun EXPORT_SYMBOL(fw_fill_response);
728*4882a593Smuzhiyun
compute_split_timeout_timestamp(struct fw_card * card,u32 request_timestamp)729*4882a593Smuzhiyun static u32 compute_split_timeout_timestamp(struct fw_card *card,
730*4882a593Smuzhiyun u32 request_timestamp)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun unsigned int cycles;
733*4882a593Smuzhiyun u32 timestamp;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun cycles = card->split_timeout_cycles;
736*4882a593Smuzhiyun cycles += request_timestamp & 0x1fff;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun timestamp = request_timestamp & ~0x1fff;
739*4882a593Smuzhiyun timestamp += (cycles / 8000) << 13;
740*4882a593Smuzhiyun timestamp |= cycles % 8000;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return timestamp;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
allocate_request(struct fw_card * card,struct fw_packet * p)745*4882a593Smuzhiyun static struct fw_request *allocate_request(struct fw_card *card,
746*4882a593Smuzhiyun struct fw_packet *p)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun struct fw_request *request;
749*4882a593Smuzhiyun u32 *data, length;
750*4882a593Smuzhiyun int request_tcode;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun request_tcode = HEADER_GET_TCODE(p->header[0]);
753*4882a593Smuzhiyun switch (request_tcode) {
754*4882a593Smuzhiyun case TCODE_WRITE_QUADLET_REQUEST:
755*4882a593Smuzhiyun data = &p->header[3];
756*4882a593Smuzhiyun length = 4;
757*4882a593Smuzhiyun break;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun case TCODE_WRITE_BLOCK_REQUEST:
760*4882a593Smuzhiyun case TCODE_LOCK_REQUEST:
761*4882a593Smuzhiyun data = p->payload;
762*4882a593Smuzhiyun length = HEADER_GET_DATA_LENGTH(p->header[3]);
763*4882a593Smuzhiyun break;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun case TCODE_READ_QUADLET_REQUEST:
766*4882a593Smuzhiyun data = NULL;
767*4882a593Smuzhiyun length = 4;
768*4882a593Smuzhiyun break;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun case TCODE_READ_BLOCK_REQUEST:
771*4882a593Smuzhiyun data = NULL;
772*4882a593Smuzhiyun length = HEADER_GET_DATA_LENGTH(p->header[3]);
773*4882a593Smuzhiyun break;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun default:
776*4882a593Smuzhiyun fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
777*4882a593Smuzhiyun p->header[0], p->header[1], p->header[2]);
778*4882a593Smuzhiyun return NULL;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
782*4882a593Smuzhiyun if (request == NULL)
783*4882a593Smuzhiyun return NULL;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun request->response.speed = p->speed;
786*4882a593Smuzhiyun request->response.timestamp =
787*4882a593Smuzhiyun compute_split_timeout_timestamp(card, p->timestamp);
788*4882a593Smuzhiyun request->response.generation = p->generation;
789*4882a593Smuzhiyun request->response.ack = 0;
790*4882a593Smuzhiyun request->response.callback = free_response_callback;
791*4882a593Smuzhiyun request->ack = p->ack;
792*4882a593Smuzhiyun request->length = length;
793*4882a593Smuzhiyun if (data)
794*4882a593Smuzhiyun memcpy(request->data, data, length);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun memcpy(request->request_header, p->header, sizeof(p->header));
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun return request;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
fw_send_response(struct fw_card * card,struct fw_request * request,int rcode)801*4882a593Smuzhiyun void fw_send_response(struct fw_card *card,
802*4882a593Smuzhiyun struct fw_request *request, int rcode)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun if (WARN_ONCE(!request, "invalid for FCP address handlers"))
805*4882a593Smuzhiyun return;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* unified transaction or broadcast transaction: don't respond */
808*4882a593Smuzhiyun if (request->ack != ACK_PENDING ||
809*4882a593Smuzhiyun HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
810*4882a593Smuzhiyun kfree(request);
811*4882a593Smuzhiyun return;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if (rcode == RCODE_COMPLETE)
815*4882a593Smuzhiyun fw_fill_response(&request->response, request->request_header,
816*4882a593Smuzhiyun rcode, request->data,
817*4882a593Smuzhiyun fw_get_response_length(request));
818*4882a593Smuzhiyun else
819*4882a593Smuzhiyun fw_fill_response(&request->response, request->request_header,
820*4882a593Smuzhiyun rcode, NULL, 0);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun card->driver->send_response(card, &request->response);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun EXPORT_SYMBOL(fw_send_response);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /**
827*4882a593Smuzhiyun * fw_get_request_speed() - returns speed at which the @request was received
828*4882a593Smuzhiyun * @request: firewire request data
829*4882a593Smuzhiyun */
fw_get_request_speed(struct fw_request * request)830*4882a593Smuzhiyun int fw_get_request_speed(struct fw_request *request)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun return request->response.speed;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun EXPORT_SYMBOL(fw_get_request_speed);
835*4882a593Smuzhiyun
handle_exclusive_region_request(struct fw_card * card,struct fw_packet * p,struct fw_request * request,unsigned long long offset)836*4882a593Smuzhiyun static void handle_exclusive_region_request(struct fw_card *card,
837*4882a593Smuzhiyun struct fw_packet *p,
838*4882a593Smuzhiyun struct fw_request *request,
839*4882a593Smuzhiyun unsigned long long offset)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct fw_address_handler *handler;
842*4882a593Smuzhiyun int tcode, destination, source;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun destination = HEADER_GET_DESTINATION(p->header[0]);
845*4882a593Smuzhiyun source = HEADER_GET_SOURCE(p->header[1]);
846*4882a593Smuzhiyun tcode = HEADER_GET_TCODE(p->header[0]);
847*4882a593Smuzhiyun if (tcode == TCODE_LOCK_REQUEST)
848*4882a593Smuzhiyun tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun rcu_read_lock();
851*4882a593Smuzhiyun handler = lookup_enclosing_address_handler(&address_handler_list,
852*4882a593Smuzhiyun offset, request->length);
853*4882a593Smuzhiyun if (handler)
854*4882a593Smuzhiyun handler->address_callback(card, request,
855*4882a593Smuzhiyun tcode, destination, source,
856*4882a593Smuzhiyun p->generation, offset,
857*4882a593Smuzhiyun request->data, request->length,
858*4882a593Smuzhiyun handler->callback_data);
859*4882a593Smuzhiyun rcu_read_unlock();
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (!handler)
862*4882a593Smuzhiyun fw_send_response(card, request, RCODE_ADDRESS_ERROR);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
handle_fcp_region_request(struct fw_card * card,struct fw_packet * p,struct fw_request * request,unsigned long long offset)865*4882a593Smuzhiyun static void handle_fcp_region_request(struct fw_card *card,
866*4882a593Smuzhiyun struct fw_packet *p,
867*4882a593Smuzhiyun struct fw_request *request,
868*4882a593Smuzhiyun unsigned long long offset)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun struct fw_address_handler *handler;
871*4882a593Smuzhiyun int tcode, destination, source;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
874*4882a593Smuzhiyun offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
875*4882a593Smuzhiyun request->length > 0x200) {
876*4882a593Smuzhiyun fw_send_response(card, request, RCODE_ADDRESS_ERROR);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun tcode = HEADER_GET_TCODE(p->header[0]);
882*4882a593Smuzhiyun destination = HEADER_GET_DESTINATION(p->header[0]);
883*4882a593Smuzhiyun source = HEADER_GET_SOURCE(p->header[1]);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
886*4882a593Smuzhiyun tcode != TCODE_WRITE_BLOCK_REQUEST) {
887*4882a593Smuzhiyun fw_send_response(card, request, RCODE_TYPE_ERROR);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun return;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun rcu_read_lock();
893*4882a593Smuzhiyun list_for_each_entry_rcu(handler, &address_handler_list, link) {
894*4882a593Smuzhiyun if (is_enclosing_handler(handler, offset, request->length))
895*4882a593Smuzhiyun handler->address_callback(card, NULL, tcode,
896*4882a593Smuzhiyun destination, source,
897*4882a593Smuzhiyun p->generation, offset,
898*4882a593Smuzhiyun request->data,
899*4882a593Smuzhiyun request->length,
900*4882a593Smuzhiyun handler->callback_data);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun rcu_read_unlock();
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun fw_send_response(card, request, RCODE_COMPLETE);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
fw_core_handle_request(struct fw_card * card,struct fw_packet * p)907*4882a593Smuzhiyun void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun struct fw_request *request;
910*4882a593Smuzhiyun unsigned long long offset;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
913*4882a593Smuzhiyun return;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
916*4882a593Smuzhiyun fw_cdev_handle_phy_packet(card, p);
917*4882a593Smuzhiyun return;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun request = allocate_request(card, p);
921*4882a593Smuzhiyun if (request == NULL) {
922*4882a593Smuzhiyun /* FIXME: send statically allocated busy packet. */
923*4882a593Smuzhiyun return;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
927*4882a593Smuzhiyun p->header[2];
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun if (!is_in_fcp_region(offset, request->length))
930*4882a593Smuzhiyun handle_exclusive_region_request(card, p, request, offset);
931*4882a593Smuzhiyun else
932*4882a593Smuzhiyun handle_fcp_region_request(card, p, request, offset);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_handle_request);
936*4882a593Smuzhiyun
fw_core_handle_response(struct fw_card * card,struct fw_packet * p)937*4882a593Smuzhiyun void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun struct fw_transaction *t = NULL, *iter;
940*4882a593Smuzhiyun unsigned long flags;
941*4882a593Smuzhiyun u32 *data;
942*4882a593Smuzhiyun size_t data_length;
943*4882a593Smuzhiyun int tcode, tlabel, source, rcode;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun tcode = HEADER_GET_TCODE(p->header[0]);
946*4882a593Smuzhiyun tlabel = HEADER_GET_TLABEL(p->header[0]);
947*4882a593Smuzhiyun source = HEADER_GET_SOURCE(p->header[1]);
948*4882a593Smuzhiyun rcode = HEADER_GET_RCODE(p->header[1]);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
951*4882a593Smuzhiyun list_for_each_entry(iter, &card->transaction_list, link) {
952*4882a593Smuzhiyun if (iter->node_id == source && iter->tlabel == tlabel) {
953*4882a593Smuzhiyun if (!try_cancel_split_timeout(iter)) {
954*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
955*4882a593Smuzhiyun goto timed_out;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun list_del_init(&iter->link);
958*4882a593Smuzhiyun card->tlabel_mask &= ~(1ULL << iter->tlabel);
959*4882a593Smuzhiyun t = iter;
960*4882a593Smuzhiyun break;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (!t) {
966*4882a593Smuzhiyun timed_out:
967*4882a593Smuzhiyun fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
968*4882a593Smuzhiyun source, tlabel);
969*4882a593Smuzhiyun return;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * FIXME: sanity check packet, is length correct, does tcodes
974*4882a593Smuzhiyun * and addresses match.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun switch (tcode) {
978*4882a593Smuzhiyun case TCODE_READ_QUADLET_RESPONSE:
979*4882a593Smuzhiyun data = (u32 *) &p->header[3];
980*4882a593Smuzhiyun data_length = 4;
981*4882a593Smuzhiyun break;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun case TCODE_WRITE_RESPONSE:
984*4882a593Smuzhiyun data = NULL;
985*4882a593Smuzhiyun data_length = 0;
986*4882a593Smuzhiyun break;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun case TCODE_READ_BLOCK_RESPONSE:
989*4882a593Smuzhiyun case TCODE_LOCK_RESPONSE:
990*4882a593Smuzhiyun data = p->payload;
991*4882a593Smuzhiyun data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun default:
995*4882a593Smuzhiyun /* Should never happen, this is just to shut up gcc. */
996*4882a593Smuzhiyun data = NULL;
997*4882a593Smuzhiyun data_length = 0;
998*4882a593Smuzhiyun break;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /*
1002*4882a593Smuzhiyun * The response handler may be executed while the request handler
1003*4882a593Smuzhiyun * is still pending. Cancel the request handler.
1004*4882a593Smuzhiyun */
1005*4882a593Smuzhiyun card->driver->cancel_packet(card, &t->packet);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun t->callback(card, rcode, data, data_length, t->callback_data);
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_handle_response);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /**
1012*4882a593Smuzhiyun * fw_rcode_string - convert a firewire result code to an error description
1013*4882a593Smuzhiyun * @rcode: the result code
1014*4882a593Smuzhiyun */
fw_rcode_string(int rcode)1015*4882a593Smuzhiyun const char *fw_rcode_string(int rcode)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun static const char *const names[] = {
1018*4882a593Smuzhiyun [RCODE_COMPLETE] = "no error",
1019*4882a593Smuzhiyun [RCODE_CONFLICT_ERROR] = "conflict error",
1020*4882a593Smuzhiyun [RCODE_DATA_ERROR] = "data error",
1021*4882a593Smuzhiyun [RCODE_TYPE_ERROR] = "type error",
1022*4882a593Smuzhiyun [RCODE_ADDRESS_ERROR] = "address error",
1023*4882a593Smuzhiyun [RCODE_SEND_ERROR] = "send error",
1024*4882a593Smuzhiyun [RCODE_CANCELLED] = "timeout",
1025*4882a593Smuzhiyun [RCODE_BUSY] = "busy",
1026*4882a593Smuzhiyun [RCODE_GENERATION] = "bus reset",
1027*4882a593Smuzhiyun [RCODE_NO_ACK] = "no ack",
1028*4882a593Smuzhiyun };
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1031*4882a593Smuzhiyun return names[rcode];
1032*4882a593Smuzhiyun else
1033*4882a593Smuzhiyun return "unknown";
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun EXPORT_SYMBOL(fw_rcode_string);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun static const struct fw_address_region topology_map_region =
1038*4882a593Smuzhiyun { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1039*4882a593Smuzhiyun .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1040*4882a593Smuzhiyun
handle_topology_map(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1041*4882a593Smuzhiyun static void handle_topology_map(struct fw_card *card, struct fw_request *request,
1042*4882a593Smuzhiyun int tcode, int destination, int source, int generation,
1043*4882a593Smuzhiyun unsigned long long offset, void *payload, size_t length,
1044*4882a593Smuzhiyun void *callback_data)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun int start;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (!TCODE_IS_READ_REQUEST(tcode)) {
1049*4882a593Smuzhiyun fw_send_response(card, request, RCODE_TYPE_ERROR);
1050*4882a593Smuzhiyun return;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if ((offset & 3) > 0 || (length & 3) > 0) {
1054*4882a593Smuzhiyun fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1055*4882a593Smuzhiyun return;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun start = (offset - topology_map_region.start) / 4;
1059*4882a593Smuzhiyun memcpy(payload, &card->topology_map[start], length);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun fw_send_response(card, request, RCODE_COMPLETE);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun static struct fw_address_handler topology_map = {
1065*4882a593Smuzhiyun .length = 0x400,
1066*4882a593Smuzhiyun .address_callback = handle_topology_map,
1067*4882a593Smuzhiyun };
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun static const struct fw_address_region registers_region =
1070*4882a593Smuzhiyun { .start = CSR_REGISTER_BASE,
1071*4882a593Smuzhiyun .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1072*4882a593Smuzhiyun
update_split_timeout(struct fw_card * card)1073*4882a593Smuzhiyun static void update_split_timeout(struct fw_card *card)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun unsigned int cycles;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
1080*4882a593Smuzhiyun cycles = clamp(cycles, 800u, 3u * 8000u);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun card->split_timeout_cycles = cycles;
1083*4882a593Smuzhiyun card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
handle_registers(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1086*4882a593Smuzhiyun static void handle_registers(struct fw_card *card, struct fw_request *request,
1087*4882a593Smuzhiyun int tcode, int destination, int source, int generation,
1088*4882a593Smuzhiyun unsigned long long offset, void *payload, size_t length,
1089*4882a593Smuzhiyun void *callback_data)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun int reg = offset & ~CSR_REGISTER_BASE;
1092*4882a593Smuzhiyun __be32 *data = payload;
1093*4882a593Smuzhiyun int rcode = RCODE_COMPLETE;
1094*4882a593Smuzhiyun unsigned long flags;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun switch (reg) {
1097*4882a593Smuzhiyun case CSR_PRIORITY_BUDGET:
1098*4882a593Smuzhiyun if (!card->priority_budget_implemented) {
1099*4882a593Smuzhiyun rcode = RCODE_ADDRESS_ERROR;
1100*4882a593Smuzhiyun break;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun fallthrough;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun case CSR_NODE_IDS:
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
1107*4882a593Smuzhiyun * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
1108*4882a593Smuzhiyun */
1109*4882a593Smuzhiyun fallthrough;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun case CSR_STATE_CLEAR:
1112*4882a593Smuzhiyun case CSR_STATE_SET:
1113*4882a593Smuzhiyun case CSR_CYCLE_TIME:
1114*4882a593Smuzhiyun case CSR_BUS_TIME:
1115*4882a593Smuzhiyun case CSR_BUSY_TIMEOUT:
1116*4882a593Smuzhiyun if (tcode == TCODE_READ_QUADLET_REQUEST)
1117*4882a593Smuzhiyun *data = cpu_to_be32(card->driver->read_csr(card, reg));
1118*4882a593Smuzhiyun else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1119*4882a593Smuzhiyun card->driver->write_csr(card, reg, be32_to_cpu(*data));
1120*4882a593Smuzhiyun else
1121*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1122*4882a593Smuzhiyun break;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun case CSR_RESET_START:
1125*4882a593Smuzhiyun if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1126*4882a593Smuzhiyun card->driver->write_csr(card, CSR_STATE_CLEAR,
1127*4882a593Smuzhiyun CSR_STATE_BIT_ABDICATE);
1128*4882a593Smuzhiyun else
1129*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1130*4882a593Smuzhiyun break;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun case CSR_SPLIT_TIMEOUT_HI:
1133*4882a593Smuzhiyun if (tcode == TCODE_READ_QUADLET_REQUEST) {
1134*4882a593Smuzhiyun *data = cpu_to_be32(card->split_timeout_hi);
1135*4882a593Smuzhiyun } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1136*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
1137*4882a593Smuzhiyun card->split_timeout_hi = be32_to_cpu(*data) & 7;
1138*4882a593Smuzhiyun update_split_timeout(card);
1139*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
1140*4882a593Smuzhiyun } else {
1141*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun break;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun case CSR_SPLIT_TIMEOUT_LO:
1146*4882a593Smuzhiyun if (tcode == TCODE_READ_QUADLET_REQUEST) {
1147*4882a593Smuzhiyun *data = cpu_to_be32(card->split_timeout_lo);
1148*4882a593Smuzhiyun } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1149*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
1150*4882a593Smuzhiyun card->split_timeout_lo =
1151*4882a593Smuzhiyun be32_to_cpu(*data) & 0xfff80000;
1152*4882a593Smuzhiyun update_split_timeout(card);
1153*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
1154*4882a593Smuzhiyun } else {
1155*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun break;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun case CSR_MAINT_UTILITY:
1160*4882a593Smuzhiyun if (tcode == TCODE_READ_QUADLET_REQUEST)
1161*4882a593Smuzhiyun *data = card->maint_utility_register;
1162*4882a593Smuzhiyun else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1163*4882a593Smuzhiyun card->maint_utility_register = *data;
1164*4882a593Smuzhiyun else
1165*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1166*4882a593Smuzhiyun break;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun case CSR_BROADCAST_CHANNEL:
1169*4882a593Smuzhiyun if (tcode == TCODE_READ_QUADLET_REQUEST)
1170*4882a593Smuzhiyun *data = cpu_to_be32(card->broadcast_channel);
1171*4882a593Smuzhiyun else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1172*4882a593Smuzhiyun card->broadcast_channel =
1173*4882a593Smuzhiyun (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
1174*4882a593Smuzhiyun BROADCAST_CHANNEL_INITIAL;
1175*4882a593Smuzhiyun else
1176*4882a593Smuzhiyun rcode = RCODE_TYPE_ERROR;
1177*4882a593Smuzhiyun break;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun case CSR_BUS_MANAGER_ID:
1180*4882a593Smuzhiyun case CSR_BANDWIDTH_AVAILABLE:
1181*4882a593Smuzhiyun case CSR_CHANNELS_AVAILABLE_HI:
1182*4882a593Smuzhiyun case CSR_CHANNELS_AVAILABLE_LO:
1183*4882a593Smuzhiyun /*
1184*4882a593Smuzhiyun * FIXME: these are handled by the OHCI hardware and
1185*4882a593Smuzhiyun * the stack never sees these request. If we add
1186*4882a593Smuzhiyun * support for a new type of controller that doesn't
1187*4882a593Smuzhiyun * handle this in hardware we need to deal with these
1188*4882a593Smuzhiyun * transactions.
1189*4882a593Smuzhiyun */
1190*4882a593Smuzhiyun BUG();
1191*4882a593Smuzhiyun break;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun default:
1194*4882a593Smuzhiyun rcode = RCODE_ADDRESS_ERROR;
1195*4882a593Smuzhiyun break;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun fw_send_response(card, request, rcode);
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun static struct fw_address_handler registers = {
1202*4882a593Smuzhiyun .length = 0x400,
1203*4882a593Smuzhiyun .address_callback = handle_registers,
1204*4882a593Smuzhiyun };
1205*4882a593Smuzhiyun
handle_low_memory(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)1206*4882a593Smuzhiyun static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1207*4882a593Smuzhiyun int tcode, int destination, int source, int generation,
1208*4882a593Smuzhiyun unsigned long long offset, void *payload, size_t length,
1209*4882a593Smuzhiyun void *callback_data)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun /*
1212*4882a593Smuzhiyun * This catches requests not handled by the physical DMA unit,
1213*4882a593Smuzhiyun * i.e., wrong transaction types or unauthorized source nodes.
1214*4882a593Smuzhiyun */
1215*4882a593Smuzhiyun fw_send_response(card, request, RCODE_TYPE_ERROR);
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun static struct fw_address_handler low_memory = {
1219*4882a593Smuzhiyun .length = FW_MAX_PHYSICAL_RANGE,
1220*4882a593Smuzhiyun .address_callback = handle_low_memory,
1221*4882a593Smuzhiyun };
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1224*4882a593Smuzhiyun MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1225*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun static const u32 vendor_textual_descriptor[] = {
1228*4882a593Smuzhiyun /* textual descriptor leaf () */
1229*4882a593Smuzhiyun 0x00060000,
1230*4882a593Smuzhiyun 0x00000000,
1231*4882a593Smuzhiyun 0x00000000,
1232*4882a593Smuzhiyun 0x4c696e75, /* L i n u */
1233*4882a593Smuzhiyun 0x78204669, /* x F i */
1234*4882a593Smuzhiyun 0x72657769, /* r e w i */
1235*4882a593Smuzhiyun 0x72650000, /* r e */
1236*4882a593Smuzhiyun };
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun static const u32 model_textual_descriptor[] = {
1239*4882a593Smuzhiyun /* model descriptor leaf () */
1240*4882a593Smuzhiyun 0x00030000,
1241*4882a593Smuzhiyun 0x00000000,
1242*4882a593Smuzhiyun 0x00000000,
1243*4882a593Smuzhiyun 0x4a756a75, /* J u j u */
1244*4882a593Smuzhiyun };
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun static struct fw_descriptor vendor_id_descriptor = {
1247*4882a593Smuzhiyun .length = ARRAY_SIZE(vendor_textual_descriptor),
1248*4882a593Smuzhiyun .immediate = 0x03001f11,
1249*4882a593Smuzhiyun .key = 0x81000000,
1250*4882a593Smuzhiyun .data = vendor_textual_descriptor,
1251*4882a593Smuzhiyun };
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun static struct fw_descriptor model_id_descriptor = {
1254*4882a593Smuzhiyun .length = ARRAY_SIZE(model_textual_descriptor),
1255*4882a593Smuzhiyun .immediate = 0x17023901,
1256*4882a593Smuzhiyun .key = 0x81000000,
1257*4882a593Smuzhiyun .data = model_textual_descriptor,
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun
fw_core_init(void)1260*4882a593Smuzhiyun static int __init fw_core_init(void)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun int ret;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
1265*4882a593Smuzhiyun if (!fw_workqueue)
1266*4882a593Smuzhiyun return -ENOMEM;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun ret = bus_register(&fw_bus_type);
1269*4882a593Smuzhiyun if (ret < 0) {
1270*4882a593Smuzhiyun destroy_workqueue(fw_workqueue);
1271*4882a593Smuzhiyun return ret;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1275*4882a593Smuzhiyun if (fw_cdev_major < 0) {
1276*4882a593Smuzhiyun bus_unregister(&fw_bus_type);
1277*4882a593Smuzhiyun destroy_workqueue(fw_workqueue);
1278*4882a593Smuzhiyun return fw_cdev_major;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun fw_core_add_address_handler(&topology_map, &topology_map_region);
1282*4882a593Smuzhiyun fw_core_add_address_handler(®isters, ®isters_region);
1283*4882a593Smuzhiyun fw_core_add_address_handler(&low_memory, &low_memory_region);
1284*4882a593Smuzhiyun fw_core_add_descriptor(&vendor_id_descriptor);
1285*4882a593Smuzhiyun fw_core_add_descriptor(&model_id_descriptor);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun return 0;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
fw_core_cleanup(void)1290*4882a593Smuzhiyun static void __exit fw_core_cleanup(void)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun unregister_chrdev(fw_cdev_major, "firewire");
1293*4882a593Smuzhiyun bus_unregister(&fw_bus_type);
1294*4882a593Smuzhiyun destroy_workqueue(fw_workqueue);
1295*4882a593Smuzhiyun idr_destroy(&fw_device_idr);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun module_init(fw_core_init);
1299*4882a593Smuzhiyun module_exit(fw_core_cleanup);
1300