xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2*4882a593Smuzhiyun /* Copyright 2017-2019 NXP */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "enetc.h"
5*4882a593Smuzhiyun 
enetc_clean_cbdr(struct enetc_si * si)6*4882a593Smuzhiyun static void enetc_clean_cbdr(struct enetc_si *si)
7*4882a593Smuzhiyun {
8*4882a593Smuzhiyun 	struct enetc_cbdr *ring = &si->cbd_ring;
9*4882a593Smuzhiyun 	struct enetc_cbd *dest_cbd;
10*4882a593Smuzhiyun 	int i, status;
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun 	i = ring->next_to_clean;
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	while (enetc_rd_reg(ring->cir) != i) {
15*4882a593Smuzhiyun 		dest_cbd = ENETC_CBD(*ring, i);
16*4882a593Smuzhiyun 		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
17*4882a593Smuzhiyun 		if (status)
18*4882a593Smuzhiyun 			dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
19*4882a593Smuzhiyun 				 status, dest_cbd->cmd);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 		memset(dest_cbd, 0, sizeof(*dest_cbd));
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 		i = (i + 1) % ring->bd_count;
24*4882a593Smuzhiyun 	}
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	ring->next_to_clean = i;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
enetc_cbd_unused(struct enetc_cbdr * r)29*4882a593Smuzhiyun static int enetc_cbd_unused(struct enetc_cbdr *r)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
32*4882a593Smuzhiyun 		r->bd_count;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
enetc_send_cmd(struct enetc_si * si,struct enetc_cbd * cbd)35*4882a593Smuzhiyun int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct enetc_cbdr *ring = &si->cbd_ring;
38*4882a593Smuzhiyun 	int timeout = ENETC_CBDR_TIMEOUT;
39*4882a593Smuzhiyun 	struct enetc_cbd *dest_cbd;
40*4882a593Smuzhiyun 	int i;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (unlikely(!ring->bd_base))
43*4882a593Smuzhiyun 		return -EIO;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (unlikely(!enetc_cbd_unused(ring)))
46*4882a593Smuzhiyun 		enetc_clean_cbdr(si);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	i = ring->next_to_use;
49*4882a593Smuzhiyun 	dest_cbd = ENETC_CBD(*ring, i);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* copy command to the ring */
52*4882a593Smuzhiyun 	*dest_cbd = *cbd;
53*4882a593Smuzhiyun 	i = (i + 1) % ring->bd_count;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ring->next_to_use = i;
56*4882a593Smuzhiyun 	/* let H/W know BD ring has been updated */
57*4882a593Smuzhiyun 	enetc_wr_reg(ring->pir, i);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	do {
60*4882a593Smuzhiyun 		if (enetc_rd_reg(ring->cir) == i)
61*4882a593Smuzhiyun 			break;
62*4882a593Smuzhiyun 		udelay(10); /* cannot sleep, rtnl_lock() */
63*4882a593Smuzhiyun 		timeout -= 10;
64*4882a593Smuzhiyun 	} while (timeout);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (!timeout)
67*4882a593Smuzhiyun 		return -EBUSY;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/* CBD may writeback data, feedback up level */
70*4882a593Smuzhiyun 	*cbd = *dest_cbd;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	enetc_clean_cbdr(si);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
enetc_clear_mac_flt_entry(struct enetc_si * si,int index)77*4882a593Smuzhiyun int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct enetc_cbd cbd;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	memset(&cbd, 0, sizeof(cbd));
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	cbd.cls = 1;
84*4882a593Smuzhiyun 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
85*4882a593Smuzhiyun 	cbd.index = cpu_to_le16(index);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return enetc_send_cmd(si, &cbd);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
enetc_set_mac_flt_entry(struct enetc_si * si,int index,char * mac_addr,int si_map)90*4882a593Smuzhiyun int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
91*4882a593Smuzhiyun 			    char *mac_addr, int si_map)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct enetc_cbd cbd;
94*4882a593Smuzhiyun 	u32 upper;
95*4882a593Smuzhiyun 	u16 lower;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	memset(&cbd, 0, sizeof(cbd));
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* fill up the "set" descriptor */
100*4882a593Smuzhiyun 	cbd.cls = 1;
101*4882a593Smuzhiyun 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
102*4882a593Smuzhiyun 	cbd.index = cpu_to_le16(index);
103*4882a593Smuzhiyun 	cbd.opt[3] = cpu_to_le32(si_map);
104*4882a593Smuzhiyun 	/* enable entry */
105*4882a593Smuzhiyun 	cbd.opt[0] = cpu_to_le32(BIT(31));
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	upper = *(const u32 *)mac_addr;
108*4882a593Smuzhiyun 	lower = *(const u16 *)(mac_addr + 4);
109*4882a593Smuzhiyun 	cbd.addr[0] = cpu_to_le32(upper);
110*4882a593Smuzhiyun 	cbd.addr[1] = cpu_to_le32(lower);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return enetc_send_cmd(si, &cbd);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define RFSE_ALIGN	64
116*4882a593Smuzhiyun /* Set entry in RFS table */
enetc_set_fs_entry(struct enetc_si * si,struct enetc_cmd_rfse * rfse,int index)117*4882a593Smuzhiyun int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
118*4882a593Smuzhiyun 		       int index)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct enetc_cbd cbd = {.cmd = 0};
121*4882a593Smuzhiyun 	dma_addr_t dma, dma_align;
122*4882a593Smuzhiyun 	void *tmp, *tmp_align;
123*4882a593Smuzhiyun 	int err;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* fill up the "set" descriptor */
126*4882a593Smuzhiyun 	cbd.cmd = 0;
127*4882a593Smuzhiyun 	cbd.cls = 4;
128*4882a593Smuzhiyun 	cbd.index = cpu_to_le16(index);
129*4882a593Smuzhiyun 	cbd.length = cpu_to_le16(sizeof(*rfse));
130*4882a593Smuzhiyun 	cbd.opt[3] = cpu_to_le32(0); /* SI */
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
133*4882a593Smuzhiyun 				 &dma, GFP_KERNEL);
134*4882a593Smuzhiyun 	if (!tmp) {
135*4882a593Smuzhiyun 		dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
136*4882a593Smuzhiyun 		return -ENOMEM;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	dma_align = ALIGN(dma, RFSE_ALIGN);
140*4882a593Smuzhiyun 	tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
141*4882a593Smuzhiyun 	memcpy(tmp_align, rfse, sizeof(*rfse));
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
144*4882a593Smuzhiyun 	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	err = enetc_send_cmd(si, &cbd);
147*4882a593Smuzhiyun 	if (err)
148*4882a593Smuzhiyun 		dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
151*4882a593Smuzhiyun 			  tmp, dma);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return err;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define RSSE_ALIGN	64
enetc_cmd_rss_table(struct enetc_si * si,u32 * table,int count,bool read)157*4882a593Smuzhiyun static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
158*4882a593Smuzhiyun 			       bool read)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct enetc_cbd cbd = {.cmd = 0};
161*4882a593Smuzhiyun 	dma_addr_t dma, dma_align;
162*4882a593Smuzhiyun 	u8 *tmp, *tmp_align;
163*4882a593Smuzhiyun 	int err, i;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (count < RSSE_ALIGN)
166*4882a593Smuzhiyun 		/* HW only takes in a full 64 entry table */
167*4882a593Smuzhiyun 		return -EINVAL;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
170*4882a593Smuzhiyun 				 &dma, GFP_KERNEL);
171*4882a593Smuzhiyun 	if (!tmp) {
172*4882a593Smuzhiyun 		dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
173*4882a593Smuzhiyun 		return -ENOMEM;
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 	dma_align = ALIGN(dma, RSSE_ALIGN);
176*4882a593Smuzhiyun 	tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (!read)
179*4882a593Smuzhiyun 		for (i = 0; i < count; i++)
180*4882a593Smuzhiyun 			tmp_align[i] = (u8)(table[i]);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* fill up the descriptor */
183*4882a593Smuzhiyun 	cbd.cmd = read ? 2 : 1;
184*4882a593Smuzhiyun 	cbd.cls = 3;
185*4882a593Smuzhiyun 	cbd.length = cpu_to_le16(count);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
188*4882a593Smuzhiyun 	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	err = enetc_send_cmd(si, &cbd);
191*4882a593Smuzhiyun 	if (err)
192*4882a593Smuzhiyun 		dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (read)
195*4882a593Smuzhiyun 		for (i = 0; i < count; i++)
196*4882a593Smuzhiyun 			table[i] = tmp_align[i];
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return err;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Get RSS table */
enetc_get_rss_table(struct enetc_si * si,u32 * table,int count)204*4882a593Smuzhiyun int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	return enetc_cmd_rss_table(si, table, count, true);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /* Set RSS table */
enetc_set_rss_table(struct enetc_si * si,const u32 * table,int count)210*4882a593Smuzhiyun int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return enetc_cmd_rss_table(si, (u32 *)table, count, false);
213*4882a593Smuzhiyun }
214