1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019 Intel Corporation */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "igc.h"
5*4882a593Smuzhiyun #include "igc_tsn.h"
6*4882a593Smuzhiyun
is_any_launchtime(struct igc_adapter * adapter)7*4882a593Smuzhiyun static bool is_any_launchtime(struct igc_adapter *adapter)
8*4882a593Smuzhiyun {
9*4882a593Smuzhiyun int i;
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun for (i = 0; i < adapter->num_tx_queues; i++) {
12*4882a593Smuzhiyun struct igc_ring *ring = adapter->tx_ring[i];
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun if (ring->launchtime_enable)
15*4882a593Smuzhiyun return true;
16*4882a593Smuzhiyun }
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun return false;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Returns the TSN specific registers to their default values after
22*4882a593Smuzhiyun * TSN offloading is disabled.
23*4882a593Smuzhiyun */
igc_tsn_disable_offload(struct igc_adapter * adapter)24*4882a593Smuzhiyun static int igc_tsn_disable_offload(struct igc_adapter *adapter)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct igc_hw *hw = &adapter->hw;
27*4882a593Smuzhiyun u32 tqavctrl;
28*4882a593Smuzhiyun int i;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
31*4882a593Smuzhiyun return 0;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun adapter->cycle_time = 0;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
36*4882a593Smuzhiyun wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun tqavctrl = rd32(IGC_TQAVCTRL);
39*4882a593Smuzhiyun tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
40*4882a593Smuzhiyun IGC_TQAVCTRL_ENHANCED_QAV);
41*4882a593Smuzhiyun wr32(IGC_TQAVCTRL, tqavctrl);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun for (i = 0; i < adapter->num_tx_queues; i++) {
44*4882a593Smuzhiyun struct igc_ring *ring = adapter->tx_ring[i];
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun ring->start_time = 0;
47*4882a593Smuzhiyun ring->end_time = 0;
48*4882a593Smuzhiyun ring->launchtime_enable = false;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun wr32(IGC_TXQCTL(i), 0);
51*4882a593Smuzhiyun wr32(IGC_STQT(i), 0);
52*4882a593Smuzhiyun wr32(IGC_ENDQT(i), NSEC_PER_SEC);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
56*4882a593Smuzhiyun wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
igc_tsn_enable_offload(struct igc_adapter * adapter)63*4882a593Smuzhiyun static int igc_tsn_enable_offload(struct igc_adapter *adapter)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct igc_hw *hw = &adapter->hw;
66*4882a593Smuzhiyun u32 tqavctrl, baset_l, baset_h;
67*4882a593Smuzhiyun u32 sec, nsec, cycle;
68*4882a593Smuzhiyun ktime_t base_time, systim;
69*4882a593Smuzhiyun int i;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
72*4882a593Smuzhiyun return 0;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun cycle = adapter->cycle_time;
75*4882a593Smuzhiyun base_time = adapter->base_time;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun wr32(IGC_TSAUXC, 0);
78*4882a593Smuzhiyun wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
79*4882a593Smuzhiyun wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun tqavctrl = rd32(IGC_TQAVCTRL);
82*4882a593Smuzhiyun tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
83*4882a593Smuzhiyun wr32(IGC_TQAVCTRL, tqavctrl);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun wr32(IGC_QBVCYCLET_S, cycle);
86*4882a593Smuzhiyun wr32(IGC_QBVCYCLET, cycle);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun for (i = 0; i < adapter->num_tx_queues; i++) {
89*4882a593Smuzhiyun struct igc_ring *ring = adapter->tx_ring[i];
90*4882a593Smuzhiyun u32 txqctl = 0;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun wr32(IGC_STQT(i), ring->start_time);
93*4882a593Smuzhiyun wr32(IGC_ENDQT(i), ring->end_time);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (adapter->base_time) {
96*4882a593Smuzhiyun /* If we have a base_time we are in "taprio"
97*4882a593Smuzhiyun * mode and we need to be strict about the
98*4882a593Smuzhiyun * cycles: only transmit a packet if it can be
99*4882a593Smuzhiyun * completed during that cycle.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun txqctl |= IGC_TXQCTL_STRICT_CYCLE |
102*4882a593Smuzhiyun IGC_TXQCTL_STRICT_END;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (ring->launchtime_enable)
106*4882a593Smuzhiyun txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun wr32(IGC_TXQCTL(i), txqctl);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun nsec = rd32(IGC_SYSTIML);
112*4882a593Smuzhiyun sec = rd32(IGC_SYSTIMH);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun systim = ktime_set(sec, nsec);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (ktime_compare(systim, base_time) > 0) {
117*4882a593Smuzhiyun s64 n;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
120*4882a593Smuzhiyun base_time = ktime_add_ns(base_time, (n + 1) * cycle);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun wr32(IGC_BASET_H, baset_h);
126*4882a593Smuzhiyun wr32(IGC_BASET_L, baset_l);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
igc_tsn_offload_apply(struct igc_adapter * adapter)133*4882a593Smuzhiyun int igc_tsn_offload_apply(struct igc_adapter *adapter)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (!is_any_enabled) {
141*4882a593Smuzhiyun int err = igc_tsn_disable_offload(adapter);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (err < 0)
144*4882a593Smuzhiyun return err;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* The BASET registers aren't cleared when writing
147*4882a593Smuzhiyun * into them, force a reset if the interface is
148*4882a593Smuzhiyun * running.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun if (netif_running(adapter->netdev))
151*4882a593Smuzhiyun schedule_work(&adapter->reset_task);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return igc_tsn_enable_offload(adapter);
157*4882a593Smuzhiyun }
158