1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _FIREWIRE_CORE_H
3*4882a593Smuzhiyun #define _FIREWIRE_CORE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/compiler.h>
6*4882a593Smuzhiyun #include <linux/device.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/fs.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/idr.h>
11*4882a593Smuzhiyun #include <linux/mm_types.h>
12*4882a593Smuzhiyun #include <linux/rwsem.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/refcount.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct device;
19*4882a593Smuzhiyun struct fw_card;
20*4882a593Smuzhiyun struct fw_device;
21*4882a593Smuzhiyun struct fw_iso_buffer;
22*4882a593Smuzhiyun struct fw_iso_context;
23*4882a593Smuzhiyun struct fw_iso_packet;
24*4882a593Smuzhiyun struct fw_node;
25*4882a593Smuzhiyun struct fw_packet;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* -card */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun extern __printf(2, 3)
31*4882a593Smuzhiyun void fw_err(const struct fw_card *card, const char *fmt, ...);
32*4882a593Smuzhiyun extern __printf(2, 3)
33*4882a593Smuzhiyun void fw_notice(const struct fw_card *card, const char *fmt, ...);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* bitfields within the PHY registers */
36*4882a593Smuzhiyun #define PHY_LINK_ACTIVE 0x80
37*4882a593Smuzhiyun #define PHY_CONTENDER 0x40
38*4882a593Smuzhiyun #define PHY_BUS_RESET 0x40
39*4882a593Smuzhiyun #define PHY_EXTENDED_REGISTERS 0xe0
40*4882a593Smuzhiyun #define PHY_BUS_SHORT_RESET 0x40
41*4882a593Smuzhiyun #define PHY_INT_STATUS_BITS 0x3c
42*4882a593Smuzhiyun #define PHY_ENABLE_ACCEL 0x02
43*4882a593Smuzhiyun #define PHY_ENABLE_MULTI 0x01
44*4882a593Smuzhiyun #define PHY_PAGE_SELECT 0xe0
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define BANDWIDTH_AVAILABLE_INITIAL 4915
47*4882a593Smuzhiyun #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
48*4882a593Smuzhiyun #define BROADCAST_CHANNEL_VALID (1 << 30)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define CSR_STATE_BIT_CMSTR (1 << 8)
51*4882a593Smuzhiyun #define CSR_STATE_BIT_ABDICATE (1 << 10)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct fw_card_driver {
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Enable the given card with the given initial config rom.
56*4882a593Smuzhiyun * This function is expected to activate the card, and either
57*4882a593Smuzhiyun * enable the PHY or set the link_on bit and initiate a bus
58*4882a593Smuzhiyun * reset.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun int (*enable)(struct fw_card *card,
61*4882a593Smuzhiyun const __be32 *config_rom, size_t length);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun int (*read_phy_reg)(struct fw_card *card, int address);
64*4882a593Smuzhiyun int (*update_phy_reg)(struct fw_card *card, int address,
65*4882a593Smuzhiyun int clear_bits, int set_bits);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Update the config rom for an enabled card. This function
69*4882a593Smuzhiyun * should change the config rom that is presented on the bus
70*4882a593Smuzhiyun * and initiate a bus reset.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun int (*set_config_rom)(struct fw_card *card,
73*4882a593Smuzhiyun const __be32 *config_rom, size_t length);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun void (*send_request)(struct fw_card *card, struct fw_packet *packet);
76*4882a593Smuzhiyun void (*send_response)(struct fw_card *card, struct fw_packet *packet);
77*4882a593Smuzhiyun /* Calling cancel is valid once a packet has been submitted. */
78*4882a593Smuzhiyun int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Allow the specified node ID to do direct DMA out and in of
82*4882a593Smuzhiyun * host memory. The card will disable this for all node when
83*4882a593Smuzhiyun * a bus reset happens, so driver need to reenable this after
84*4882a593Smuzhiyun * bus reset. Returns 0 on success, -ENODEV if the card
85*4882a593Smuzhiyun * doesn't support this, -ESTALE if the generation doesn't
86*4882a593Smuzhiyun * match.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun int (*enable_phys_dma)(struct fw_card *card,
89*4882a593Smuzhiyun int node_id, int generation);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun u32 (*read_csr)(struct fw_card *card, int csr_offset);
92*4882a593Smuzhiyun void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct fw_iso_context *
95*4882a593Smuzhiyun (*allocate_iso_context)(struct fw_card *card,
96*4882a593Smuzhiyun int type, int channel, size_t header_size);
97*4882a593Smuzhiyun void (*free_iso_context)(struct fw_iso_context *ctx);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun int (*start_iso)(struct fw_iso_context *ctx,
100*4882a593Smuzhiyun s32 cycle, u32 sync, u32 tags);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun int (*queue_iso)(struct fw_iso_context *ctx,
105*4882a593Smuzhiyun struct fw_iso_packet *packet,
106*4882a593Smuzhiyun struct fw_iso_buffer *buffer,
107*4882a593Smuzhiyun unsigned long payload);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun void (*flush_queue_iso)(struct fw_iso_context *ctx);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun int (*flush_iso_completions)(struct fw_iso_context *ctx);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun int (*stop_iso)(struct fw_iso_context *ctx);
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun void fw_card_initialize(struct fw_card *card,
117*4882a593Smuzhiyun const struct fw_card_driver *driver, struct device *device);
118*4882a593Smuzhiyun int fw_card_add(struct fw_card *card,
119*4882a593Smuzhiyun u32 max_receive, u32 link_speed, u64 guid);
120*4882a593Smuzhiyun void fw_core_remove_card(struct fw_card *card);
121*4882a593Smuzhiyun int fw_compute_block_crc(__be32 *block);
122*4882a593Smuzhiyun void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* -cdev */
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun extern const struct file_operations fw_device_ops;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun void fw_device_cdev_update(struct fw_device *device);
129*4882a593Smuzhiyun void fw_device_cdev_remove(struct fw_device *device);
130*4882a593Smuzhiyun void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* -device */
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun extern struct rw_semaphore fw_device_rwsem;
136*4882a593Smuzhiyun extern struct idr fw_device_idr;
137*4882a593Smuzhiyun extern int fw_cdev_major;
138*4882a593Smuzhiyun
fw_device_get(struct fw_device * device)139*4882a593Smuzhiyun static inline struct fw_device *fw_device_get(struct fw_device *device)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun get_device(&device->device);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return device;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
fw_device_put(struct fw_device * device)146*4882a593Smuzhiyun static inline void fw_device_put(struct fw_device *device)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun put_device(&device->device);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun struct fw_device *fw_device_get_by_devt(dev_t devt);
152*4882a593Smuzhiyun int fw_device_set_broadcast_channel(struct device *dev, void *gen);
153*4882a593Smuzhiyun void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* -iso */
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
159*4882a593Smuzhiyun int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
160*4882a593Smuzhiyun enum dma_data_direction direction);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* -topology */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun enum {
166*4882a593Smuzhiyun FW_NODE_CREATED,
167*4882a593Smuzhiyun FW_NODE_UPDATED,
168*4882a593Smuzhiyun FW_NODE_DESTROYED,
169*4882a593Smuzhiyun FW_NODE_LINK_ON,
170*4882a593Smuzhiyun FW_NODE_LINK_OFF,
171*4882a593Smuzhiyun FW_NODE_INITIATED_RESET,
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun struct fw_node {
175*4882a593Smuzhiyun u16 node_id;
176*4882a593Smuzhiyun u8 color;
177*4882a593Smuzhiyun u8 port_count;
178*4882a593Smuzhiyun u8 link_on:1;
179*4882a593Smuzhiyun u8 initiated_reset:1;
180*4882a593Smuzhiyun u8 b_path:1;
181*4882a593Smuzhiyun u8 phy_speed:2; /* As in the self ID packet. */
182*4882a593Smuzhiyun u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
183*4882a593Smuzhiyun * local node to this node. */
184*4882a593Smuzhiyun u8 max_depth:4; /* Maximum depth to any leaf node */
185*4882a593Smuzhiyun u8 max_hops:4; /* Max hops in this sub tree */
186*4882a593Smuzhiyun refcount_t ref_count;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* For serializing node topology into a list. */
189*4882a593Smuzhiyun struct list_head link;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Upper layer specific data. */
192*4882a593Smuzhiyun void *data;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun struct fw_node *ports[];
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
fw_node_get(struct fw_node * node)197*4882a593Smuzhiyun static inline struct fw_node *fw_node_get(struct fw_node *node)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun refcount_inc(&node->ref_count);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return node;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
fw_node_put(struct fw_node * node)204*4882a593Smuzhiyun static inline void fw_node_put(struct fw_node *node)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun if (refcount_dec_and_test(&node->ref_count))
207*4882a593Smuzhiyun kfree(node);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
211*4882a593Smuzhiyun int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
212*4882a593Smuzhiyun void fw_destroy_nodes(struct fw_card *card);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun * Check whether new_generation is the immediate successor of old_generation.
216*4882a593Smuzhiyun * Take counter roll-over at 255 (as per OHCI) into account.
217*4882a593Smuzhiyun */
is_next_generation(int new_generation,int old_generation)218*4882a593Smuzhiyun static inline bool is_next_generation(int new_generation, int old_generation)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* -transaction */
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun #define TCODE_LINK_INTERNAL 0xe
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
229*4882a593Smuzhiyun #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
230*4882a593Smuzhiyun #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
231*4882a593Smuzhiyun #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
232*4882a593Smuzhiyun #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
233*4882a593Smuzhiyun #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
234*4882a593Smuzhiyun #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #define LOCAL_BUS 0xffc0
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* OHCI-1394's default upper bound for physical DMA: 4 GB */
239*4882a593Smuzhiyun #define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
242*4882a593Smuzhiyun void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
243*4882a593Smuzhiyun int fw_get_response_length(struct fw_request *request);
244*4882a593Smuzhiyun void fw_fill_response(struct fw_packet *response, u32 *request_header,
245*4882a593Smuzhiyun int rcode, void *payload, size_t length);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #define FW_PHY_CONFIG_NO_NODE_ID -1
248*4882a593Smuzhiyun #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
249*4882a593Smuzhiyun void fw_send_phy_config(struct fw_card *card,
250*4882a593Smuzhiyun int node_id, int generation, int gap_count);
251*4882a593Smuzhiyun
is_ping_packet(u32 * data)252*4882a593Smuzhiyun static inline bool is_ping_packet(u32 *data)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun #endif /* _FIREWIRE_CORE_H */
258