1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
3*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
19*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
20*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun * in the file called LICENSE.GPL.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * BSD LICENSE
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27*4882a593Smuzhiyun * All rights reserved.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
30*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
31*4882a593Smuzhiyun * are met:
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
34*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
35*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
36*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
37*4882a593Smuzhiyun * the documentation and/or other materials provided with the
38*4882a593Smuzhiyun * distribution.
39*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
40*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
41*4882a593Smuzhiyun * from this software without specific prior written permission.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #ifndef _SCI_HOST_H_
56*4882a593Smuzhiyun #define _SCI_HOST_H_
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #include <scsi/sas_ata.h>
59*4882a593Smuzhiyun #include "remote_device.h"
60*4882a593Smuzhiyun #include "phy.h"
61*4882a593Smuzhiyun #include "isci.h"
62*4882a593Smuzhiyun #include "remote_node_table.h"
63*4882a593Smuzhiyun #include "registers.h"
64*4882a593Smuzhiyun #include "unsolicited_frame_control.h"
65*4882a593Smuzhiyun #include "probe_roms.h"
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct isci_request;
68*4882a593Smuzhiyun struct scu_task_context;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun * struct sci_power_control -
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * This structure defines the fields for managing power control for direct
75*4882a593Smuzhiyun * attached disk devices.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun struct sci_power_control {
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun * This field is set when the power control timer is running and cleared when
80*4882a593Smuzhiyun * it is not.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun bool timer_started;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun * Timer to control when the directed attached disks can consume power.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun struct sci_timer timer;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun * This field is used to keep track of how many phys are put into the
91*4882a593Smuzhiyun * requesters field.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun u8 phys_waiting;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * This field is used to keep track of how many phys have been granted to consume power
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun u8 phys_granted_power;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * This field is an array of phys that we are waiting on. The phys are direct
102*4882a593Smuzhiyun * mapped into requesters via struct sci_phy.phy_index
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun struct isci_phy *requesters[SCI_MAX_PHYS];
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct sci_port_configuration_agent;
109*4882a593Smuzhiyun typedef void (*port_config_fn)(struct isci_host *,
110*4882a593Smuzhiyun struct sci_port_configuration_agent *,
111*4882a593Smuzhiyun struct isci_port *, struct isci_phy *);
112*4882a593Smuzhiyun bool is_port_config_apc(struct isci_host *ihost);
113*4882a593Smuzhiyun bool is_controller_start_complete(struct isci_host *ihost);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct sci_port_configuration_agent {
116*4882a593Smuzhiyun u16 phy_configured_mask;
117*4882a593Smuzhiyun u16 phy_ready_mask;
118*4882a593Smuzhiyun struct {
119*4882a593Smuzhiyun u8 min_index;
120*4882a593Smuzhiyun u8 max_index;
121*4882a593Smuzhiyun } phy_valid_port_range[SCI_MAX_PHYS];
122*4882a593Smuzhiyun bool timer_pending;
123*4882a593Smuzhiyun port_config_fn link_up_handler;
124*4882a593Smuzhiyun port_config_fn link_down_handler;
125*4882a593Smuzhiyun struct sci_timer timer;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun * isci_host - primary host/controller object
130*4882a593Smuzhiyun * @timer: timeout start/stop operations
131*4882a593Smuzhiyun * @device_table: rni (hw remote node index) to remote device lookup table
132*4882a593Smuzhiyun * @available_remote_nodes: rni allocator
133*4882a593Smuzhiyun * @power_control: manage device spin up
134*4882a593Smuzhiyun * @io_request_sequence: generation number for tci's (task contexts)
135*4882a593Smuzhiyun * @task_context_table: hw task context table
136*4882a593Smuzhiyun * @remote_node_context_table: hw remote node context table
137*4882a593Smuzhiyun * @completion_queue: hw-producer driver-consumer communication ring
138*4882a593Smuzhiyun * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
139*4882a593Smuzhiyun * @logical_port_entries: min({driver|silicon}-supported-port-count)
140*4882a593Smuzhiyun * @remote_node_entries: min({driver|silicon}-supported-node-count)
141*4882a593Smuzhiyun * @task_context_entries: min({driver|silicon}-supported-task-count)
142*4882a593Smuzhiyun * @phy_timer: phy startup timer
143*4882a593Smuzhiyun * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
144*4882a593Smuzhiyun * the phy index is set so further notifications are not
145*4882a593Smuzhiyun * made. Once the phy reports link up and is made part of a
146*4882a593Smuzhiyun * port then this bit is cleared.
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun struct isci_host {
150*4882a593Smuzhiyun struct sci_base_state_machine sm;
151*4882a593Smuzhiyun /* XXX can we time this externally */
152*4882a593Smuzhiyun struct sci_timer timer;
153*4882a593Smuzhiyun /* XXX drop reference module params directly */
154*4882a593Smuzhiyun struct sci_user_parameters user_parameters;
155*4882a593Smuzhiyun /* XXX no need to be a union */
156*4882a593Smuzhiyun struct sci_oem_params oem_parameters;
157*4882a593Smuzhiyun struct sci_port_configuration_agent port_agent;
158*4882a593Smuzhiyun struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
159*4882a593Smuzhiyun struct sci_remote_node_table available_remote_nodes;
160*4882a593Smuzhiyun struct sci_power_control power_control;
161*4882a593Smuzhiyun u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
162*4882a593Smuzhiyun struct scu_task_context *task_context_table;
163*4882a593Smuzhiyun dma_addr_t tc_dma;
164*4882a593Smuzhiyun union scu_remote_node_context *remote_node_context_table;
165*4882a593Smuzhiyun dma_addr_t rnc_dma;
166*4882a593Smuzhiyun u32 *completion_queue;
167*4882a593Smuzhiyun dma_addr_t cq_dma;
168*4882a593Smuzhiyun u32 completion_queue_get;
169*4882a593Smuzhiyun u32 logical_port_entries;
170*4882a593Smuzhiyun u32 remote_node_entries;
171*4882a593Smuzhiyun u32 task_context_entries;
172*4882a593Smuzhiyun void *ufi_buf;
173*4882a593Smuzhiyun dma_addr_t ufi_dma;
174*4882a593Smuzhiyun struct sci_unsolicited_frame_control uf_control;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* phy startup */
177*4882a593Smuzhiyun struct sci_timer phy_timer;
178*4882a593Smuzhiyun /* XXX kill */
179*4882a593Smuzhiyun bool phy_startup_timer_pending;
180*4882a593Smuzhiyun u32 next_phy_to_start;
181*4882a593Smuzhiyun /* XXX convert to unsigned long and use bitops */
182*4882a593Smuzhiyun u8 invalid_phy_mask;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* TODO attempt dynamic interrupt coalescing scheme */
185*4882a593Smuzhiyun u16 interrupt_coalesce_number;
186*4882a593Smuzhiyun u32 interrupt_coalesce_timeout;
187*4882a593Smuzhiyun struct smu_registers __iomem *smu_registers;
188*4882a593Smuzhiyun struct scu_registers __iomem *scu_registers;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun u16 tci_head;
191*4882a593Smuzhiyun u16 tci_tail;
192*4882a593Smuzhiyun u16 tci_pool[SCI_MAX_IO_REQUESTS];
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun int id; /* unique within a given pci device */
195*4882a593Smuzhiyun struct isci_phy phys[SCI_MAX_PHYS];
196*4882a593Smuzhiyun struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
197*4882a593Smuzhiyun struct asd_sas_port sas_ports[SCI_MAX_PORTS];
198*4882a593Smuzhiyun struct sas_ha_struct sas_ha;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun struct pci_dev *pdev;
201*4882a593Smuzhiyun #define IHOST_START_PENDING 0
202*4882a593Smuzhiyun #define IHOST_STOP_PENDING 1
203*4882a593Smuzhiyun #define IHOST_IRQ_ENABLED 2
204*4882a593Smuzhiyun unsigned long flags;
205*4882a593Smuzhiyun wait_queue_head_t eventq;
206*4882a593Smuzhiyun struct tasklet_struct completion_tasklet;
207*4882a593Smuzhiyun spinlock_t scic_lock;
208*4882a593Smuzhiyun struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
209*4882a593Smuzhiyun struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun * enum sci_controller_states - This enumeration depicts all the states
214*4882a593Smuzhiyun * for the common controller state machine.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun enum sci_controller_states {
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * Simply the initial state for the base controller state machine.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun SCIC_INITIAL = 0,
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * This state indicates that the controller is reset. The memory for
224*4882a593Smuzhiyun * the controller is in it's initial state, but the controller requires
225*4882a593Smuzhiyun * initialization.
226*4882a593Smuzhiyun * This state is entered from the INITIAL state.
227*4882a593Smuzhiyun * This state is entered from the RESETTING state.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun SCIC_RESET,
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * This state is typically an action state that indicates the controller
233*4882a593Smuzhiyun * is in the process of initialization. In this state no new IO operations
234*4882a593Smuzhiyun * are permitted.
235*4882a593Smuzhiyun * This state is entered from the RESET state.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun SCIC_INITIALIZING,
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * This state indicates that the controller has been successfully
241*4882a593Smuzhiyun * initialized. In this state no new IO operations are permitted.
242*4882a593Smuzhiyun * This state is entered from the INITIALIZING state.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun SCIC_INITIALIZED,
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun * This state indicates the the controller is in the process of becoming
248*4882a593Smuzhiyun * ready (i.e. starting). In this state no new IO operations are permitted.
249*4882a593Smuzhiyun * This state is entered from the INITIALIZED state.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun SCIC_STARTING,
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * This state indicates the controller is now ready. Thus, the user
255*4882a593Smuzhiyun * is able to perform IO operations on the controller.
256*4882a593Smuzhiyun * This state is entered from the STARTING state.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun SCIC_READY,
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun * This state is typically an action state that indicates the controller
262*4882a593Smuzhiyun * is in the process of resetting. Thus, the user is unable to perform
263*4882a593Smuzhiyun * IO operations on the controller. A reset is considered destructive in
264*4882a593Smuzhiyun * most cases.
265*4882a593Smuzhiyun * This state is entered from the READY state.
266*4882a593Smuzhiyun * This state is entered from the FAILED state.
267*4882a593Smuzhiyun * This state is entered from the STOPPED state.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun SCIC_RESETTING,
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * This state indicates that the controller is in the process of stopping.
273*4882a593Smuzhiyun * In this state no new IO operations are permitted, but existing IO
274*4882a593Smuzhiyun * operations are allowed to complete.
275*4882a593Smuzhiyun * This state is entered from the READY state.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun SCIC_STOPPING,
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun * This state indicates that the controller could not successfully be
281*4882a593Smuzhiyun * initialized. In this state no new IO operations are permitted.
282*4882a593Smuzhiyun * This state is entered from the INITIALIZING state.
283*4882a593Smuzhiyun * This state is entered from the STARTING state.
284*4882a593Smuzhiyun * This state is entered from the STOPPING state.
285*4882a593Smuzhiyun * This state is entered from the RESETTING state.
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun SCIC_FAILED,
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun * struct isci_pci_info - This class represents the pci function containing the
292*4882a593Smuzhiyun * controllers. Depending on PCI SKU, there could be up to 2 controllers in
293*4882a593Smuzhiyun * the PCI function.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun struct isci_pci_info {
298*4882a593Smuzhiyun struct isci_host *hosts[SCI_MAX_CONTROLLERS];
299*4882a593Smuzhiyun struct isci_orom *orom;
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun
to_pci_info(struct pci_dev * pdev)302*4882a593Smuzhiyun static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return pci_get_drvdata(pdev);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
to_shost(struct isci_host * ihost)307*4882a593Smuzhiyun static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return ihost->sas_ha.core.shost;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun #define for_each_isci_host(id, ihost, pdev) \
313*4882a593Smuzhiyun for (id = 0; id < SCI_MAX_CONTROLLERS && \
314*4882a593Smuzhiyun (ihost = to_pci_info(pdev)->hosts[id]); id++)
315*4882a593Smuzhiyun
wait_for_start(struct isci_host * ihost)316*4882a593Smuzhiyun static inline void wait_for_start(struct isci_host *ihost)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
wait_for_stop(struct isci_host * ihost)321*4882a593Smuzhiyun static inline void wait_for_stop(struct isci_host *ihost)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
wait_for_device_start(struct isci_host * ihost,struct isci_remote_device * idev)326*4882a593Smuzhiyun static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
wait_for_device_stop(struct isci_host * ihost,struct isci_remote_device * idev)331*4882a593Smuzhiyun static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
dev_to_ihost(struct domain_device * dev)336*4882a593Smuzhiyun static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun return dev->port->ha->lldd_ha;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
idev_to_ihost(struct isci_remote_device * idev)341*4882a593Smuzhiyun static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun return dev_to_ihost(idev->domain_dev);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* we always use protocol engine group zero */
347*4882a593Smuzhiyun #define ISCI_PEG 0
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* see sci_controller_io_tag_allocate|free for how seq and tci are built */
350*4882a593Smuzhiyun #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* these are returned by the hardware, so sanitize them */
353*4882a593Smuzhiyun #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
354*4882a593Smuzhiyun #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
357*4882a593Smuzhiyun #define ISCI_COALESCE_BASE 9
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* expander attached sata devices require 3 rnc slots */
sci_remote_device_node_count(struct isci_remote_device * idev)360*4882a593Smuzhiyun static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct domain_device *dev = idev->domain_dev;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (dev_is_sata(dev) && dev->parent)
365*4882a593Smuzhiyun return SCU_STP_REMOTE_NODE_COUNT;
366*4882a593Smuzhiyun return SCU_SSP_REMOTE_NODE_COUNT;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun * sci_controller_clear_invalid_phy() -
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * This macro will clear the bit in the invalid phy mask for this controller
373*4882a593Smuzhiyun * object. This is used to control messages reported for invalid link up
374*4882a593Smuzhiyun * notifications.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun #define sci_controller_clear_invalid_phy(controller, phy) \
377*4882a593Smuzhiyun ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
378*4882a593Smuzhiyun
scirdev_to_dev(struct isci_remote_device * idev)379*4882a593Smuzhiyun static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
382*4882a593Smuzhiyun return NULL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return &idev->isci_port->isci_host->pdev->dev;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
is_a2(struct pci_dev * pdev)387*4882a593Smuzhiyun static inline bool is_a2(struct pci_dev *pdev)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun if (pdev->revision < 4)
390*4882a593Smuzhiyun return true;
391*4882a593Smuzhiyun return false;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
is_b0(struct pci_dev * pdev)394*4882a593Smuzhiyun static inline bool is_b0(struct pci_dev *pdev)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun if (pdev->revision == 4)
397*4882a593Smuzhiyun return true;
398*4882a593Smuzhiyun return false;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
is_c0(struct pci_dev * pdev)401*4882a593Smuzhiyun static inline bool is_c0(struct pci_dev *pdev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun if (pdev->revision == 5)
404*4882a593Smuzhiyun return true;
405*4882a593Smuzhiyun return false;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
is_c1(struct pci_dev * pdev)408*4882a593Smuzhiyun static inline bool is_c1(struct pci_dev *pdev)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun if (pdev->revision >= 6)
411*4882a593Smuzhiyun return true;
412*4882a593Smuzhiyun return false;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun enum cable_selections {
416*4882a593Smuzhiyun short_cable = 0,
417*4882a593Smuzhiyun long_cable = 1,
418*4882a593Smuzhiyun medium_cable = 2,
419*4882a593Smuzhiyun undefined_cable = 3
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun #define CABLE_OVERRIDE_DISABLED (0x10000)
423*4882a593Smuzhiyun
is_cable_select_overridden(void)424*4882a593Smuzhiyun static inline int is_cable_select_overridden(void)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun return cable_selection_override < CABLE_OVERRIDE_DISABLED;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
430*4882a593Smuzhiyun void validate_cable_selections(struct isci_host *ihost);
431*4882a593Smuzhiyun char *lookup_cable_names(enum cable_selections);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* set hw control for 'activity', even though active enclosures seem to drive
434*4882a593Smuzhiyun * the activity led on their own. Skip setting FSENG control on 'status' due
435*4882a593Smuzhiyun * to unexpected operation and 'error' due to not being a supported automatic
436*4882a593Smuzhiyun * FSENG output
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun #define SGPIO_HW_CONTROL 0x00000443
439*4882a593Smuzhiyun
isci_gpio_count(struct isci_host * ihost)440*4882a593Smuzhiyun static inline int isci_gpio_count(struct isci_host *ihost)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun void sci_controller_post_request(struct isci_host *ihost,
446*4882a593Smuzhiyun u32 request);
447*4882a593Smuzhiyun void sci_controller_release_frame(struct isci_host *ihost,
448*4882a593Smuzhiyun u32 frame_index);
449*4882a593Smuzhiyun void sci_controller_copy_sata_response(void *response_buffer,
450*4882a593Smuzhiyun void *frame_header,
451*4882a593Smuzhiyun void *frame_buffer);
452*4882a593Smuzhiyun enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
453*4882a593Smuzhiyun struct isci_remote_device *idev,
454*4882a593Smuzhiyun u16 *node_id);
455*4882a593Smuzhiyun void sci_controller_free_remote_node_context(
456*4882a593Smuzhiyun struct isci_host *ihost,
457*4882a593Smuzhiyun struct isci_remote_device *idev,
458*4882a593Smuzhiyun u16 node_id);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
461*4882a593Smuzhiyun void sci_controller_power_control_queue_insert(struct isci_host *ihost,
462*4882a593Smuzhiyun struct isci_phy *iphy);
463*4882a593Smuzhiyun void sci_controller_power_control_queue_remove(struct isci_host *ihost,
464*4882a593Smuzhiyun struct isci_phy *iphy);
465*4882a593Smuzhiyun void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
466*4882a593Smuzhiyun struct isci_phy *iphy);
467*4882a593Smuzhiyun void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
468*4882a593Smuzhiyun struct isci_phy *iphy);
469*4882a593Smuzhiyun void sci_controller_remote_device_stopped(struct isci_host *ihost,
470*4882a593Smuzhiyun struct isci_remote_device *idev);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun enum sci_status sci_controller_continue_io(struct isci_request *ireq);
473*4882a593Smuzhiyun int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
474*4882a593Smuzhiyun void isci_host_start(struct Scsi_Host *);
475*4882a593Smuzhiyun u16 isci_alloc_tag(struct isci_host *ihost);
476*4882a593Smuzhiyun enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
477*4882a593Smuzhiyun void isci_tci_free(struct isci_host *ihost, u16 tci);
478*4882a593Smuzhiyun void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun int isci_host_init(struct isci_host *);
481*4882a593Smuzhiyun void isci_host_completion_routine(unsigned long data);
482*4882a593Smuzhiyun void isci_host_deinit(struct isci_host *);
483*4882a593Smuzhiyun void sci_controller_disable_interrupts(struct isci_host *ihost);
484*4882a593Smuzhiyun bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
485*4882a593Smuzhiyun void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun enum sci_status sci_controller_start_io(
488*4882a593Smuzhiyun struct isci_host *ihost,
489*4882a593Smuzhiyun struct isci_remote_device *idev,
490*4882a593Smuzhiyun struct isci_request *ireq);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun enum sci_status sci_controller_start_task(
493*4882a593Smuzhiyun struct isci_host *ihost,
494*4882a593Smuzhiyun struct isci_remote_device *idev,
495*4882a593Smuzhiyun struct isci_request *ireq);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun enum sci_status sci_controller_terminate_request(
498*4882a593Smuzhiyun struct isci_host *ihost,
499*4882a593Smuzhiyun struct isci_remote_device *idev,
500*4882a593Smuzhiyun struct isci_request *ireq);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun enum sci_status sci_controller_complete_io(
503*4882a593Smuzhiyun struct isci_host *ihost,
504*4882a593Smuzhiyun struct isci_remote_device *idev,
505*4882a593Smuzhiyun struct isci_request *ireq);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun void sci_port_configuration_agent_construct(
508*4882a593Smuzhiyun struct sci_port_configuration_agent *port_agent);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun enum sci_status sci_port_configuration_agent_initialize(
511*4882a593Smuzhiyun struct isci_host *ihost,
512*4882a593Smuzhiyun struct sci_port_configuration_agent *port_agent);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
515*4882a593Smuzhiyun u8 reg_count, u8 *write_data);
516*4882a593Smuzhiyun #endif
517