xref: /OK3568_Linux_fs/kernel/drivers/net/wimax/i2400m/control.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Intel Wireless WiMAX Connection 2400m
3*4882a593Smuzhiyun  * Miscellaneous control functions for managing the device
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
9*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
10*4882a593Smuzhiyun  * are met:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *   * Redistributions of source code must retain the above copyright
13*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer.
14*4882a593Smuzhiyun  *   * Redistributions in binary form must reproduce the above copyright
15*4882a593Smuzhiyun  *     notice, this list of conditions and the following disclaimer in
16*4882a593Smuzhiyun  *     the documentation and/or other materials provided with the
17*4882a593Smuzhiyun  *     distribution.
18*4882a593Smuzhiyun  *   * Neither the name of Intel Corporation nor the names of its
19*4882a593Smuzhiyun  *     contributors may be used to endorse or promote products derived
20*4882a593Smuzhiyun  *     from this software without specific prior written permission.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * Intel Corporation <linux-wimax@intel.com>
36*4882a593Smuzhiyun  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
37*4882a593Smuzhiyun  *  - Initial implementation
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * This is a collection of functions used to control the device (plus
40*4882a593Smuzhiyun  * a few helpers).
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * There are utilities for handling TLV buffers, hooks on the device's
43*4882a593Smuzhiyun  * reports to act on device changes of state [i2400m_report_hook()],
44*4882a593Smuzhiyun  * on acks to commands [i2400m_msg_ack_hook()], a helper for sending
45*4882a593Smuzhiyun  * commands to the device and blocking until a reply arrives
46*4882a593Smuzhiyun  * [i2400m_msg_to_dev()], a few high level commands for manipulating
47*4882a593Smuzhiyun  * the device state, powersving mode and configuration plus the
48*4882a593Smuzhiyun  * routines to setup the device once communication is stablished with
49*4882a593Smuzhiyun  * it [i2400m_dev_initialize()].
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * ROADMAP
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * i2400m_dev_initialize()       Called by i2400m_dev_start()
54*4882a593Smuzhiyun  *   i2400m_set_init_config()
55*4882a593Smuzhiyun  *   i2400m_cmd_get_state()
56*4882a593Smuzhiyun  * i2400m_dev_shutdown()        Called by i2400m_dev_stop()
57*4882a593Smuzhiyun  *   i2400m_reset()
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * i2400m_{cmd,get,set}_*()
60*4882a593Smuzhiyun  *   i2400m_msg_to_dev()
61*4882a593Smuzhiyun  *   i2400m_msg_check_status()
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * i2400m_report_hook()         Called on reception of an event
64*4882a593Smuzhiyun  *   i2400m_report_state_hook()
65*4882a593Smuzhiyun  *     i2400m_tlv_buffer_walk()
66*4882a593Smuzhiyun  *     i2400m_tlv_match()
67*4882a593Smuzhiyun  *     i2400m_report_tlv_system_state()
68*4882a593Smuzhiyun  *     i2400m_report_tlv_rf_switches_status()
69*4882a593Smuzhiyun  *     i2400m_report_tlv_media_status()
70*4882a593Smuzhiyun  *   i2400m_cmd_enter_powersave()
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  * i2400m_msg_ack_hook()        Called on reception of a reply to a
73*4882a593Smuzhiyun  *                              command, get or set
74*4882a593Smuzhiyun  */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #include <stdarg.h>
77*4882a593Smuzhiyun #include "i2400m.h"
78*4882a593Smuzhiyun #include <linux/kernel.h>
79*4882a593Smuzhiyun #include <linux/slab.h>
80*4882a593Smuzhiyun #include <linux/wimax/i2400m.h>
81*4882a593Smuzhiyun #include <linux/export.h>
82*4882a593Smuzhiyun #include <linux/moduleparam.h>
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define D_SUBMODULE control
86*4882a593Smuzhiyun #include "debug-levels.h"
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
89*4882a593Smuzhiyun module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
90*4882a593Smuzhiyun MODULE_PARM_DESC(idle_mode_disabled,
91*4882a593Smuzhiyun 		 "If true, the device will not enable idle mode negotiation "
92*4882a593Smuzhiyun 		 "with the base station (when connected) to save power.");
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* 0 (power saving enabled) by default */
95*4882a593Smuzhiyun static int i2400m_power_save_disabled;
96*4882a593Smuzhiyun module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
97*4882a593Smuzhiyun MODULE_PARM_DESC(power_save_disabled,
98*4882a593Smuzhiyun 		 "If true, the driver will not tell the device to enter "
99*4882a593Smuzhiyun 		 "power saving mode when it reports it is ready for it. "
100*4882a593Smuzhiyun 		 "False by default (so the device is told to do power "
101*4882a593Smuzhiyun 		 "saving).");
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static int i2400m_passive_mode;	/* 0 (passive mode disabled) by default */
104*4882a593Smuzhiyun module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
105*4882a593Smuzhiyun MODULE_PARM_DESC(passive_mode,
106*4882a593Smuzhiyun 		 "If true, the driver will not do any device setup "
107*4882a593Smuzhiyun 		 "and leave it up to user space, who must be properly "
108*4882a593Smuzhiyun 		 "setup.");
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Return if a TLV is of a give type and size
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * @tlv_hdr: pointer to the TLV
115*4882a593Smuzhiyun  * @tlv_type: type of the TLV we are looking for
116*4882a593Smuzhiyun  * @tlv_size: expected size of the TLV we are looking for (if -1,
117*4882a593Smuzhiyun  *            don't check the size). This includes the header
118*4882a593Smuzhiyun  * Returns: 0 if the TLV matches
119*4882a593Smuzhiyun  *          < 0 if it doesn't match at all
120*4882a593Smuzhiyun  *          > 0 total TLV + payload size, if the type matches, but not
121*4882a593Smuzhiyun  *              the size
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun static
i2400m_tlv_match(const struct i2400m_tlv_hdr * tlv,enum i2400m_tlv tlv_type,ssize_t tlv_size)124*4882a593Smuzhiyun ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
125*4882a593Smuzhiyun 		     enum i2400m_tlv tlv_type, ssize_t tlv_size)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	if (le16_to_cpu(tlv->type) != tlv_type)	/* Not our type? skip */
128*4882a593Smuzhiyun 		return -1;
129*4882a593Smuzhiyun 	if (tlv_size != -1
130*4882a593Smuzhiyun 	    && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
131*4882a593Smuzhiyun 		size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
132*4882a593Smuzhiyun 		printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
133*4882a593Smuzhiyun 		       "size (got %zu vs %zd expected)\n",
134*4882a593Smuzhiyun 		       tlv_type, size, tlv_size);
135*4882a593Smuzhiyun 		return size;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun  * Given a buffer of TLVs, iterate over them
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * @i2400m: device instance
145*4882a593Smuzhiyun  * @tlv_buf: pointer to the beginning of the TLV buffer
146*4882a593Smuzhiyun  * @buf_size: buffer size in bytes
147*4882a593Smuzhiyun  * @tlv_pos: seek position; this is assumed to be a pointer returned
148*4882a593Smuzhiyun  *           by i2400m_tlv_buffer_walk() [and thus, validated]. The
149*4882a593Smuzhiyun  *           TLV returned will be the one following this one.
150*4882a593Smuzhiyun  *
151*4882a593Smuzhiyun  * Usage:
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * tlv_itr = NULL;
154*4882a593Smuzhiyun  * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr))  {
155*4882a593Smuzhiyun  *         ...
156*4882a593Smuzhiyun  *         // Do stuff with tlv_itr, DON'T MODIFY IT
157*4882a593Smuzhiyun  *         ...
158*4882a593Smuzhiyun  * }
159*4882a593Smuzhiyun  */
160*4882a593Smuzhiyun static
i2400m_tlv_buffer_walk(struct i2400m * i2400m,const void * tlv_buf,size_t buf_size,const struct i2400m_tlv_hdr * tlv_pos)161*4882a593Smuzhiyun const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk(
162*4882a593Smuzhiyun 	struct i2400m *i2400m,
163*4882a593Smuzhiyun 	const void *tlv_buf, size_t buf_size,
164*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv_pos)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
167*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size;
168*4882a593Smuzhiyun 	size_t offset, length, avail_size;
169*4882a593Smuzhiyun 	unsigned type;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (tlv_pos == NULL)	/* Take the first one? */
172*4882a593Smuzhiyun 		tlv_pos = tlv_buf;
173*4882a593Smuzhiyun 	else			/* Nope, the next one */
174*4882a593Smuzhiyun 		tlv_pos = (void *) tlv_pos
175*4882a593Smuzhiyun 			+ le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos);
176*4882a593Smuzhiyun 	if (tlv_pos == tlv_top) {	/* buffer done */
177*4882a593Smuzhiyun 		tlv_pos = NULL;
178*4882a593Smuzhiyun 		goto error_beyond_end;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 	if (tlv_pos > tlv_top) {
181*4882a593Smuzhiyun 		tlv_pos = NULL;
182*4882a593Smuzhiyun 		WARN_ON(1);
183*4882a593Smuzhiyun 		goto error_beyond_end;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 	offset = (void *) tlv_pos - (void *) tlv_buf;
186*4882a593Smuzhiyun 	avail_size = buf_size - offset;
187*4882a593Smuzhiyun 	if (avail_size < sizeof(*tlv_pos)) {
188*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: "
189*4882a593Smuzhiyun 			"short header\n", tlv_buf, buf_size, offset);
190*4882a593Smuzhiyun 		goto error_short_header;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 	type = le16_to_cpu(tlv_pos->type);
193*4882a593Smuzhiyun 	length = le16_to_cpu(tlv_pos->length);
194*4882a593Smuzhiyun 	if (avail_size < sizeof(*tlv_pos) + length) {
195*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], "
196*4882a593Smuzhiyun 			"tlv type 0x%04x @%zu: "
197*4882a593Smuzhiyun 			"short data (%zu bytes vs %zu needed)\n",
198*4882a593Smuzhiyun 			tlv_buf, buf_size, type, offset, avail_size,
199*4882a593Smuzhiyun 			sizeof(*tlv_pos) + length);
200*4882a593Smuzhiyun 		goto error_short_header;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun error_short_header:
203*4882a593Smuzhiyun error_beyond_end:
204*4882a593Smuzhiyun 	return tlv_pos;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun  * Find a TLV in a buffer of sequential TLVs
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * @i2400m: device descriptor
212*4882a593Smuzhiyun  * @tlv_hdr: pointer to the first TLV in the sequence
213*4882a593Smuzhiyun  * @size: size of the buffer in bytes; all TLVs are assumed to fit
214*4882a593Smuzhiyun  *        fully in the buffer (otherwise we'll complain).
215*4882a593Smuzhiyun  * @tlv_type: type of the TLV we are looking for
216*4882a593Smuzhiyun  * @tlv_size: expected size of the TLV we are looking for (if -1,
217*4882a593Smuzhiyun  *            don't check the size). This includes the header
218*4882a593Smuzhiyun  *
219*4882a593Smuzhiyun  * Returns: NULL if the TLV is not found, otherwise a pointer to
220*4882a593Smuzhiyun  *          it. If the sizes don't match, an error is printed and NULL
221*4882a593Smuzhiyun  *          returned.
222*4882a593Smuzhiyun  */
223*4882a593Smuzhiyun static
i2400m_tlv_find(struct i2400m * i2400m,const struct i2400m_tlv_hdr * tlv_hdr,size_t size,enum i2400m_tlv tlv_type,ssize_t tlv_size)224*4882a593Smuzhiyun const struct i2400m_tlv_hdr *i2400m_tlv_find(
225*4882a593Smuzhiyun 	struct i2400m *i2400m,
226*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv_hdr, size_t size,
227*4882a593Smuzhiyun 	enum i2400m_tlv tlv_type, ssize_t tlv_size)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	ssize_t match;
230*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
231*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv = NULL;
232*4882a593Smuzhiyun 	while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) {
233*4882a593Smuzhiyun 		match = i2400m_tlv_match(tlv, tlv_type, tlv_size);
234*4882a593Smuzhiyun 		if (match == 0)		/* found it :) */
235*4882a593Smuzhiyun 			break;
236*4882a593Smuzhiyun 		if (match > 0)
237*4882a593Smuzhiyun 			dev_warn(dev, "TLV type 0x%04x found with size "
238*4882a593Smuzhiyun 				 "mismatch (%zu vs %zd needed)\n",
239*4882a593Smuzhiyun 				 tlv_type, match, tlv_size);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	return tlv;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun static const struct
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	char *msg;
248*4882a593Smuzhiyun 	int errno;
249*4882a593Smuzhiyun } ms_to_errno[I2400M_MS_MAX] = {
250*4882a593Smuzhiyun 	[I2400M_MS_DONE_OK] = { "", 0 },
251*4882a593Smuzhiyun 	[I2400M_MS_DONE_IN_PROGRESS] = { "", 0 },
252*4882a593Smuzhiyun 	[I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS },
253*4882a593Smuzhiyun 	[I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ },
254*4882a593Smuzhiyun 	[I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL },
255*4882a593Smuzhiyun 	[I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG },
256*4882a593Smuzhiyun 	[I2400M_MS_VERSION_ERROR] = { "bad version", -EIO },
257*4882a593Smuzhiyun 	[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
258*4882a593Smuzhiyun 	[I2400M_MS_BUSY] = { "busy", -EBUSY },
259*4882a593Smuzhiyun 	[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
260*4882a593Smuzhiyun 	[I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
261*4882a593Smuzhiyun 	[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
262*4882a593Smuzhiyun 	[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
263*4882a593Smuzhiyun 	[I2400M_MS_NO_RF] = { "no RF", -EIO },
264*4882a593Smuzhiyun 	[I2400M_MS_NOT_READY_FOR_POWERSAVE] =
265*4882a593Smuzhiyun 		{ "not ready for powersave", -EACCES },
266*4882a593Smuzhiyun 	[I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT },
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun  * i2400m_msg_check_status - translate a message's status code
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * @i2400m: device descriptor
274*4882a593Smuzhiyun  * @l3l4_hdr: message header
275*4882a593Smuzhiyun  * @strbuf: buffer to place a formatted error message (unless NULL).
276*4882a593Smuzhiyun  * @strbuf_size: max amount of available space; larger messages will
277*4882a593Smuzhiyun  * be truncated.
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * Returns: errno code corresponding to the status code in @l3l4_hdr
280*4882a593Smuzhiyun  *          and a message in @strbuf describing the error.
281*4882a593Smuzhiyun  */
i2400m_msg_check_status(const struct i2400m_l3l4_hdr * l3l4_hdr,char * strbuf,size_t strbuf_size)282*4882a593Smuzhiyun int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
283*4882a593Smuzhiyun 			    char *strbuf, size_t strbuf_size)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	int result;
286*4882a593Smuzhiyun 	enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status);
287*4882a593Smuzhiyun 	const char *str;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (status == 0)
290*4882a593Smuzhiyun 		return 0;
291*4882a593Smuzhiyun 	if (status >= ARRAY_SIZE(ms_to_errno)) {
292*4882a593Smuzhiyun 		str = "unknown status code";
293*4882a593Smuzhiyun 		result = -EBADR;
294*4882a593Smuzhiyun 	} else {
295*4882a593Smuzhiyun 		str = ms_to_errno[status].msg;
296*4882a593Smuzhiyun 		result = ms_to_errno[status].errno;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 	if (strbuf)
299*4882a593Smuzhiyun 		snprintf(strbuf, strbuf_size, "%s (%d)", str, status);
300*4882a593Smuzhiyun 	return result;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * Act on a TLV System State reported by the device
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * @i2400m: device descriptor
308*4882a593Smuzhiyun  * @ss: validated System State TLV
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun static
i2400m_report_tlv_system_state(struct i2400m * i2400m,const struct i2400m_tlv_system_state * ss)311*4882a593Smuzhiyun void i2400m_report_tlv_system_state(struct i2400m *i2400m,
312*4882a593Smuzhiyun 				    const struct i2400m_tlv_system_state *ss)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
315*4882a593Smuzhiyun 	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
316*4882a593Smuzhiyun 	enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (i2400m->state != i2400m_state) {
321*4882a593Smuzhiyun 		i2400m->state = i2400m_state;
322*4882a593Smuzhiyun 		wake_up_all(&i2400m->state_wq);
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 	switch (i2400m_state) {
325*4882a593Smuzhiyun 	case I2400M_SS_UNINITIALIZED:
326*4882a593Smuzhiyun 	case I2400M_SS_INIT:
327*4882a593Smuzhiyun 	case I2400M_SS_CONFIG:
328*4882a593Smuzhiyun 	case I2400M_SS_PRODUCTION:
329*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
330*4882a593Smuzhiyun 		break;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	case I2400M_SS_RF_OFF:
333*4882a593Smuzhiyun 	case I2400M_SS_RF_SHUTDOWN:
334*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF);
335*4882a593Smuzhiyun 		break;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	case I2400M_SS_READY:
338*4882a593Smuzhiyun 	case I2400M_SS_STANDBY:
339*4882a593Smuzhiyun 	case I2400M_SS_SLEEPACTIVE:
340*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_READY);
341*4882a593Smuzhiyun 		break;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	case I2400M_SS_CONNECTING:
344*4882a593Smuzhiyun 	case I2400M_SS_WIMAX_CONNECTED:
345*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_READY);
346*4882a593Smuzhiyun 		break;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	case I2400M_SS_SCAN:
349*4882a593Smuzhiyun 	case I2400M_SS_OUT_OF_ZONE:
350*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_SCANNING);
351*4882a593Smuzhiyun 		break;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	case I2400M_SS_IDLE:
354*4882a593Smuzhiyun 		d_printf(1, dev, "entering BS-negotiated idle mode\n");
355*4882a593Smuzhiyun 		fallthrough;
356*4882a593Smuzhiyun 	case I2400M_SS_DISCONNECTING:
357*4882a593Smuzhiyun 	case I2400M_SS_DATA_PATH_CONNECTED:
358*4882a593Smuzhiyun 		wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
359*4882a593Smuzhiyun 		break;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	default:
362*4882a593Smuzhiyun 		/* Huh? just in case, shut it down */
363*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
364*4882a593Smuzhiyun 			i2400m_state);
365*4882a593Smuzhiyun 		i2400m_reset(i2400m, I2400M_RT_WARM);
366*4882a593Smuzhiyun 		break;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
369*4882a593Smuzhiyun 		i2400m, ss, i2400m_state);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun  * Parse and act on a TLV Media Status sent by the device
375*4882a593Smuzhiyun  *
376*4882a593Smuzhiyun  * @i2400m: device descriptor
377*4882a593Smuzhiyun  * @ms: validated Media Status TLV
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  * This will set the carrier up on down based on the device's link
380*4882a593Smuzhiyun  * report. This is done asides of what the WiMAX stack does based on
381*4882a593Smuzhiyun  * the device's state as sometimes we need to do a link-renew (the BS
382*4882a593Smuzhiyun  * wants us to renew a DHCP lease, for example).
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * In fact, doc says that every time we get a link-up, we should do a
385*4882a593Smuzhiyun  * DHCP negotiation...
386*4882a593Smuzhiyun  */
387*4882a593Smuzhiyun static
i2400m_report_tlv_media_status(struct i2400m * i2400m,const struct i2400m_tlv_media_status * ms)388*4882a593Smuzhiyun void i2400m_report_tlv_media_status(struct i2400m *i2400m,
389*4882a593Smuzhiyun 				    const struct i2400m_tlv_media_status *ms)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
392*4882a593Smuzhiyun 	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
393*4882a593Smuzhiyun 	struct net_device *net_dev = wimax_dev->net_dev;
394*4882a593Smuzhiyun 	enum i2400m_media_status status = le32_to_cpu(ms->media_status);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	switch (status) {
399*4882a593Smuzhiyun 	case I2400M_MEDIA_STATUS_LINK_UP:
400*4882a593Smuzhiyun 		netif_carrier_on(net_dev);
401*4882a593Smuzhiyun 		break;
402*4882a593Smuzhiyun 	case I2400M_MEDIA_STATUS_LINK_DOWN:
403*4882a593Smuzhiyun 		netif_carrier_off(net_dev);
404*4882a593Smuzhiyun 		break;
405*4882a593Smuzhiyun 	/*
406*4882a593Smuzhiyun 	 * This is the network telling us we need to retrain the DHCP
407*4882a593Smuzhiyun 	 * lease -- so far, we are trusting the WiMAX Network Service
408*4882a593Smuzhiyun 	 * in user space to pick this up and poke the DHCP client.
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 	case I2400M_MEDIA_STATUS_LINK_RENEW:
411*4882a593Smuzhiyun 		netif_carrier_on(net_dev);
412*4882a593Smuzhiyun 		break;
413*4882a593Smuzhiyun 	default:
414*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? unknown media status %u\n",
415*4882a593Smuzhiyun 			status);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
418*4882a593Smuzhiyun 		i2400m, ms, status);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun  * Process a TLV from a 'state report'
424*4882a593Smuzhiyun  *
425*4882a593Smuzhiyun  * @i2400m: device descriptor
426*4882a593Smuzhiyun  * @tlv: pointer to the TLV header; it has been already validated for
427*4882a593Smuzhiyun  *     consistent size.
428*4882a593Smuzhiyun  * @tag: for error messages
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Act on the TLVs from a 'state report'.
431*4882a593Smuzhiyun  */
432*4882a593Smuzhiyun static
i2400m_report_state_parse_tlv(struct i2400m * i2400m,const struct i2400m_tlv_hdr * tlv,const char * tag)433*4882a593Smuzhiyun void i2400m_report_state_parse_tlv(struct i2400m *i2400m,
434*4882a593Smuzhiyun 				   const struct i2400m_tlv_hdr *tlv,
435*4882a593Smuzhiyun 				   const char *tag)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
438*4882a593Smuzhiyun 	const struct i2400m_tlv_media_status *ms;
439*4882a593Smuzhiyun 	const struct i2400m_tlv_system_state *ss;
440*4882a593Smuzhiyun 	const struct i2400m_tlv_rf_switches_status *rfss;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) {
443*4882a593Smuzhiyun 		ss = container_of(tlv, typeof(*ss), hdr);
444*4882a593Smuzhiyun 		d_printf(2, dev, "%s: system state TLV "
445*4882a593Smuzhiyun 			 "found (0x%04x), state 0x%08x\n",
446*4882a593Smuzhiyun 			 tag, I2400M_TLV_SYSTEM_STATE,
447*4882a593Smuzhiyun 			 le32_to_cpu(ss->state));
448*4882a593Smuzhiyun 		i2400m_report_tlv_system_state(i2400m, ss);
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 	if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) {
451*4882a593Smuzhiyun 		rfss = container_of(tlv, typeof(*rfss), hdr);
452*4882a593Smuzhiyun 		d_printf(2, dev, "%s: RF status TLV "
453*4882a593Smuzhiyun 			 "found (0x%04x), sw 0x%02x hw 0x%02x\n",
454*4882a593Smuzhiyun 			 tag, I2400M_TLV_RF_STATUS,
455*4882a593Smuzhiyun 			 le32_to_cpu(rfss->sw_rf_switch),
456*4882a593Smuzhiyun 			 le32_to_cpu(rfss->hw_rf_switch));
457*4882a593Smuzhiyun 		i2400m_report_tlv_rf_switches_status(i2400m, rfss);
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 	if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) {
460*4882a593Smuzhiyun 		ms = container_of(tlv, typeof(*ms), hdr);
461*4882a593Smuzhiyun 		d_printf(2, dev, "%s: Media Status TLV: %u\n",
462*4882a593Smuzhiyun 			 tag, le32_to_cpu(ms->media_status));
463*4882a593Smuzhiyun 		i2400m_report_tlv_media_status(i2400m, ms);
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun  * Parse a 'state report' and extract information
470*4882a593Smuzhiyun  *
471*4882a593Smuzhiyun  * @i2400m: device descriptor
472*4882a593Smuzhiyun  * @l3l4_hdr: pointer to message; it has been already validated for
473*4882a593Smuzhiyun  *            consistent size.
474*4882a593Smuzhiyun  * @size: size of the message (header + payload). The header length
475*4882a593Smuzhiyun  *        declaration is assumed to be congruent with @size (as in
476*4882a593Smuzhiyun  *        sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * Walk over the TLVs in a report state and act on them.
479*4882a593Smuzhiyun  */
480*4882a593Smuzhiyun static
i2400m_report_state_hook(struct i2400m * i2400m,const struct i2400m_l3l4_hdr * l3l4_hdr,size_t size,const char * tag)481*4882a593Smuzhiyun void i2400m_report_state_hook(struct i2400m *i2400m,
482*4882a593Smuzhiyun 			      const struct i2400m_l3l4_hdr *l3l4_hdr,
483*4882a593Smuzhiyun 			      size_t size, const char *tag)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
486*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv;
487*4882a593Smuzhiyun 	size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
490*4882a593Smuzhiyun 		  i2400m, l3l4_hdr, size, tag);
491*4882a593Smuzhiyun 	tlv = NULL;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
494*4882a593Smuzhiyun 					     tlv_size, tlv)))
495*4882a593Smuzhiyun 		i2400m_report_state_parse_tlv(i2400m, tlv, tag);
496*4882a593Smuzhiyun 	d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
497*4882a593Smuzhiyun 		i2400m, l3l4_hdr, size, tag);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun  * i2400m_report_hook - (maybe) act on a report
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * @i2400m: device descriptor
505*4882a593Smuzhiyun  * @l3l4_hdr: pointer to message; it has been already validated for
506*4882a593Smuzhiyun  *            consistent size.
507*4882a593Smuzhiyun  * @size: size of the message (header + payload). The header length
508*4882a593Smuzhiyun  *        declaration is assumed to be congruent with @size (as in
509*4882a593Smuzhiyun  *        sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  * Extract information we might need (like carrien on/off) from a
512*4882a593Smuzhiyun  * device report.
513*4882a593Smuzhiyun  */
i2400m_report_hook(struct i2400m * i2400m,const struct i2400m_l3l4_hdr * l3l4_hdr,size_t size)514*4882a593Smuzhiyun void i2400m_report_hook(struct i2400m *i2400m,
515*4882a593Smuzhiyun 			const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
518*4882a593Smuzhiyun 	unsigned msg_type;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n",
521*4882a593Smuzhiyun 		  i2400m, l3l4_hdr, size);
522*4882a593Smuzhiyun 	/* Chew on the message, we might need some information from
523*4882a593Smuzhiyun 	 * here */
524*4882a593Smuzhiyun 	msg_type = le16_to_cpu(l3l4_hdr->type);
525*4882a593Smuzhiyun 	switch (msg_type) {
526*4882a593Smuzhiyun 	case I2400M_MT_REPORT_STATE:	/* carrier detection... */
527*4882a593Smuzhiyun 		i2400m_report_state_hook(i2400m,
528*4882a593Smuzhiyun 					 l3l4_hdr, size, "REPORT STATE");
529*4882a593Smuzhiyun 		break;
530*4882a593Smuzhiyun 	/* If the device is ready for power save, then ask it to do
531*4882a593Smuzhiyun 	 * it. */
532*4882a593Smuzhiyun 	case I2400M_MT_REPORT_POWERSAVE_READY:	/* zzzzz */
533*4882a593Smuzhiyun 		if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
534*4882a593Smuzhiyun 			if (i2400m_power_save_disabled)
535*4882a593Smuzhiyun 				d_printf(1, dev, "ready for powersave, "
536*4882a593Smuzhiyun 					 "not requesting (disabled by module "
537*4882a593Smuzhiyun 					 "parameter)\n");
538*4882a593Smuzhiyun 			else {
539*4882a593Smuzhiyun 				d_printf(1, dev, "ready for powersave, "
540*4882a593Smuzhiyun 					 "requesting\n");
541*4882a593Smuzhiyun 				i2400m_cmd_enter_powersave(i2400m);
542*4882a593Smuzhiyun 			}
543*4882a593Smuzhiyun 		}
544*4882a593Smuzhiyun 		break;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
547*4882a593Smuzhiyun 		i2400m, l3l4_hdr, size);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun  * i2400m_msg_ack_hook - process cmd/set/get ack for internal status
553*4882a593Smuzhiyun  *
554*4882a593Smuzhiyun  * @i2400m: device descriptor
555*4882a593Smuzhiyun  * @l3l4_hdr: pointer to message; it has been already validated for
556*4882a593Smuzhiyun  *            consistent size.
557*4882a593Smuzhiyun  * @size: size of the message
558*4882a593Smuzhiyun  *
559*4882a593Smuzhiyun  * Extract information we might need from acks to commands and act on
560*4882a593Smuzhiyun  * it. This is akin to i2400m_report_hook(). Note most of this
561*4882a593Smuzhiyun  * processing should be done in the function that calls the
562*4882a593Smuzhiyun  * command. This is here for some cases where it can't happen...
563*4882a593Smuzhiyun  */
i2400m_msg_ack_hook(struct i2400m * i2400m,const struct i2400m_l3l4_hdr * l3l4_hdr,size_t size)564*4882a593Smuzhiyun static void i2400m_msg_ack_hook(struct i2400m *i2400m,
565*4882a593Smuzhiyun 				 const struct i2400m_l3l4_hdr *l3l4_hdr,
566*4882a593Smuzhiyun 				 size_t size)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	int result;
569*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
570*4882a593Smuzhiyun 	unsigned int ack_type;
571*4882a593Smuzhiyun 	char strerr[32];
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/* Chew on the message, we might need some information from
574*4882a593Smuzhiyun 	 * here */
575*4882a593Smuzhiyun 	ack_type = le16_to_cpu(l3l4_hdr->type);
576*4882a593Smuzhiyun 	switch (ack_type) {
577*4882a593Smuzhiyun 	case I2400M_MT_CMD_ENTER_POWERSAVE:
578*4882a593Smuzhiyun 		/* This is just left here for the sake of example, as
579*4882a593Smuzhiyun 		 * the processing is done somewhere else. */
580*4882a593Smuzhiyun 		if (0) {
581*4882a593Smuzhiyun 			result = i2400m_msg_check_status(
582*4882a593Smuzhiyun 				l3l4_hdr, strerr, sizeof(strerr));
583*4882a593Smuzhiyun 			if (result >= 0)
584*4882a593Smuzhiyun 				d_printf(1, dev, "ready for power save: %zd\n",
585*4882a593Smuzhiyun 					 size);
586*4882a593Smuzhiyun 		}
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun  * i2400m_msg_size_check() - verify message size and header are congruent
594*4882a593Smuzhiyun  *
595*4882a593Smuzhiyun  * It is ok if the total message size is larger than the expected
596*4882a593Smuzhiyun  * size, as there can be padding.
597*4882a593Smuzhiyun  */
i2400m_msg_size_check(struct i2400m * i2400m,const struct i2400m_l3l4_hdr * l3l4_hdr,size_t msg_size)598*4882a593Smuzhiyun int i2400m_msg_size_check(struct i2400m *i2400m,
599*4882a593Smuzhiyun 			  const struct i2400m_l3l4_hdr *l3l4_hdr,
600*4882a593Smuzhiyun 			  size_t msg_size)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	int result;
603*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
604*4882a593Smuzhiyun 	size_t expected_size;
605*4882a593Smuzhiyun 	d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n",
606*4882a593Smuzhiyun 		  i2400m, l3l4_hdr, msg_size);
607*4882a593Smuzhiyun 	if (msg_size < sizeof(*l3l4_hdr)) {
608*4882a593Smuzhiyun 		dev_err(dev, "bad size for message header "
609*4882a593Smuzhiyun 			"(expected at least %zu, got %zu)\n",
610*4882a593Smuzhiyun 			(size_t) sizeof(*l3l4_hdr), msg_size);
611*4882a593Smuzhiyun 		result = -EIO;
612*4882a593Smuzhiyun 		goto error_hdr_size;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 	expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr);
615*4882a593Smuzhiyun 	if (msg_size < expected_size) {
616*4882a593Smuzhiyun 		dev_err(dev, "bad size for message code 0x%04x (expected %zu, "
617*4882a593Smuzhiyun 			"got %zu)\n", le16_to_cpu(l3l4_hdr->type),
618*4882a593Smuzhiyun 			expected_size, msg_size);
619*4882a593Smuzhiyun 		result = -EIO;
620*4882a593Smuzhiyun 	} else
621*4882a593Smuzhiyun 		result = 0;
622*4882a593Smuzhiyun error_hdr_size:
623*4882a593Smuzhiyun 	d_fnend(4, dev,
624*4882a593Smuzhiyun 		"(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n",
625*4882a593Smuzhiyun 		i2400m, l3l4_hdr, msg_size, result);
626*4882a593Smuzhiyun 	return result;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /*
632*4882a593Smuzhiyun  * Cancel a wait for a command ACK
633*4882a593Smuzhiyun  *
634*4882a593Smuzhiyun  * @i2400m: device descriptor
635*4882a593Smuzhiyun  * @code: [negative] errno code to cancel with (don't use
636*4882a593Smuzhiyun  *     -EINPROGRESS)
637*4882a593Smuzhiyun  *
638*4882a593Smuzhiyun  * If there is an ack already filled out, free it.
639*4882a593Smuzhiyun  */
i2400m_msg_to_dev_cancel_wait(struct i2400m * i2400m,int code)640*4882a593Smuzhiyun void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
643*4882a593Smuzhiyun 	unsigned long flags;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	spin_lock_irqsave(&i2400m->rx_lock, flags);
646*4882a593Smuzhiyun 	ack_skb = i2400m->ack_skb;
647*4882a593Smuzhiyun 	if (ack_skb && !IS_ERR(ack_skb))
648*4882a593Smuzhiyun 		kfree_skb(ack_skb);
649*4882a593Smuzhiyun 	i2400m->ack_skb = ERR_PTR(code);
650*4882a593Smuzhiyun 	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun  * i2400m_msg_to_dev - Send a control message to the device and get a response
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * @i2400m: device descriptor
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * @buf: pointer to the buffer containing the message to be sent; it
660*4882a593Smuzhiyun  *           has to start with a &struct i2400M_l3l4_hdr and then
661*4882a593Smuzhiyun  *           followed by the payload. Once this function returns, the
662*4882a593Smuzhiyun  *           buffer can be reused.
663*4882a593Smuzhiyun  *
664*4882a593Smuzhiyun  * @buf_len: buffer size
665*4882a593Smuzhiyun  *
666*4882a593Smuzhiyun  * Returns:
667*4882a593Smuzhiyun  *
668*4882a593Smuzhiyun  * Pointer to skb containing the ack message. You need to check the
669*4882a593Smuzhiyun  * pointer with IS_ERR(), as it might be an error code. Error codes
670*4882a593Smuzhiyun  * could happen because:
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  *  - the message wasn't formatted correctly
673*4882a593Smuzhiyun  *  - couldn't send the message
674*4882a593Smuzhiyun  *  - failed waiting for a response
675*4882a593Smuzhiyun  *  - the ack message wasn't formatted correctly
676*4882a593Smuzhiyun  *
677*4882a593Smuzhiyun  * The returned skb has been allocated with wimax_msg_to_user_alloc(),
678*4882a593Smuzhiyun  * it contains the response in a netlink attribute and is ready to be
679*4882a593Smuzhiyun  * passed up to user space with wimax_msg_to_user_send(). To access
680*4882a593Smuzhiyun  * the payload and its length, use wimax_msg_{data,len}() on the skb.
681*4882a593Smuzhiyun  *
682*4882a593Smuzhiyun  * The skb has to be freed with kfree_skb() once done.
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  * Description:
685*4882a593Smuzhiyun  *
686*4882a593Smuzhiyun  * This function delivers a message/command to the device and waits
687*4882a593Smuzhiyun  * for an ack to be received. The format is described in
688*4882a593Smuzhiyun  * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
689*4882a593Smuzhiyun  * ack.
690*4882a593Smuzhiyun  *
691*4882a593Smuzhiyun  * This function will not check the ack status, that's left up to the
692*4882a593Smuzhiyun  * caller.  Once done with the ack skb, it has to be kfree_skb()ed.
693*4882a593Smuzhiyun  *
694*4882a593Smuzhiyun  * The i2400m handles only one message at the same time, thus we need
695*4882a593Smuzhiyun  * the mutex to exclude other players.
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * We write the message and then wait for an answer to come back. The
698*4882a593Smuzhiyun  * RX path intercepts control messages and handles them in
699*4882a593Smuzhiyun  * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
700*4882a593Smuzhiyun  * locally and then forwarded (as needed) to user space on the WiMAX
701*4882a593Smuzhiyun  * stack message pipe. Acks are saved and passed back to us through an
702*4882a593Smuzhiyun  * skb in i2400m->ack_skb which is ready to be given to generic
703*4882a593Smuzhiyun  * netlink if need be.
704*4882a593Smuzhiyun  */
i2400m_msg_to_dev(struct i2400m * i2400m,const void * buf,size_t buf_len)705*4882a593Smuzhiyun struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
706*4882a593Smuzhiyun 				  const void *buf, size_t buf_len)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	int result;
709*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
710*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
711*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
712*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
713*4882a593Smuzhiyun 	size_t ack_len;
714*4882a593Smuzhiyun 	int ack_timeout;
715*4882a593Smuzhiyun 	unsigned msg_type;
716*4882a593Smuzhiyun 	unsigned long flags;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
719*4882a593Smuzhiyun 		  i2400m, buf, buf_len);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	rmb();		/* Make sure we see what i2400m_dev_reset_handle() */
722*4882a593Smuzhiyun 	if (i2400m->boot_mode)
723*4882a593Smuzhiyun 		return ERR_PTR(-EL3RST);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	msg_l3l4_hdr = buf;
726*4882a593Smuzhiyun 	/* Check msg & payload consistency */
727*4882a593Smuzhiyun 	result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
728*4882a593Smuzhiyun 	if (result < 0)
729*4882a593Smuzhiyun 		goto error_bad_msg;
730*4882a593Smuzhiyun 	msg_type = le16_to_cpu(msg_l3l4_hdr->type);
731*4882a593Smuzhiyun 	d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
732*4882a593Smuzhiyun 		 msg_type, buf_len);
733*4882a593Smuzhiyun 	d_dump(2, dev, buf, buf_len);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	/* Setup the completion, ack_skb ("we are waiting") and send
736*4882a593Smuzhiyun 	 * the message to the device */
737*4882a593Smuzhiyun 	mutex_lock(&i2400m->msg_mutex);
738*4882a593Smuzhiyun 	spin_lock_irqsave(&i2400m->rx_lock, flags);
739*4882a593Smuzhiyun 	i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
740*4882a593Smuzhiyun 	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
741*4882a593Smuzhiyun 	init_completion(&i2400m->msg_completion);
742*4882a593Smuzhiyun 	result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
743*4882a593Smuzhiyun 	if (result < 0) {
744*4882a593Smuzhiyun 		dev_err(dev, "can't send message 0x%04x: %d\n",
745*4882a593Smuzhiyun 			le16_to_cpu(msg_l3l4_hdr->type), result);
746*4882a593Smuzhiyun 		goto error_tx;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	/* Some commands take longer to execute because of crypto ops,
750*4882a593Smuzhiyun 	 * so we give them some more leeway on timeout */
751*4882a593Smuzhiyun 	switch (msg_type) {
752*4882a593Smuzhiyun 	case I2400M_MT_GET_TLS_OPERATION_RESULT:
753*4882a593Smuzhiyun 	case I2400M_MT_CMD_SEND_EAP_RESPONSE:
754*4882a593Smuzhiyun 		ack_timeout = 5 * HZ;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 	default:
757*4882a593Smuzhiyun 		ack_timeout = HZ;
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (unlikely(i2400m->trace_msg_from_user))
761*4882a593Smuzhiyun 		wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
762*4882a593Smuzhiyun 	/* The RX path in rx.c will put any response for this message
763*4882a593Smuzhiyun 	 * in i2400m->ack_skb and wake us up. If we cancel the wait,
764*4882a593Smuzhiyun 	 * we need to change the value of i2400m->ack_skb to something
765*4882a593Smuzhiyun 	 * not -EINPROGRESS so RX knows there is no one waiting. */
766*4882a593Smuzhiyun 	result = wait_for_completion_interruptible_timeout(
767*4882a593Smuzhiyun 		&i2400m->msg_completion, ack_timeout);
768*4882a593Smuzhiyun 	if (result == 0) {
769*4882a593Smuzhiyun 		dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
770*4882a593Smuzhiyun 			msg_type);
771*4882a593Smuzhiyun 		result = -ETIMEDOUT;
772*4882a593Smuzhiyun 		i2400m_msg_to_dev_cancel_wait(i2400m, result);
773*4882a593Smuzhiyun 		goto error_wait_for_completion;
774*4882a593Smuzhiyun 	} else if (result < 0) {
775*4882a593Smuzhiyun 		dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
776*4882a593Smuzhiyun 			msg_type, result);
777*4882a593Smuzhiyun 		i2400m_msg_to_dev_cancel_wait(i2400m, result);
778*4882a593Smuzhiyun 		goto error_wait_for_completion;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* Pull out the ack data from i2400m->ack_skb -- see if it is
782*4882a593Smuzhiyun 	 * an error and act accordingly */
783*4882a593Smuzhiyun 	spin_lock_irqsave(&i2400m->rx_lock, flags);
784*4882a593Smuzhiyun 	ack_skb = i2400m->ack_skb;
785*4882a593Smuzhiyun 	if (IS_ERR(ack_skb))
786*4882a593Smuzhiyun 		result = PTR_ERR(ack_skb);
787*4882a593Smuzhiyun 	else
788*4882a593Smuzhiyun 		result = 0;
789*4882a593Smuzhiyun 	i2400m->ack_skb = NULL;
790*4882a593Smuzhiyun 	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
791*4882a593Smuzhiyun 	if (result < 0)
792*4882a593Smuzhiyun 		goto error_ack_status;
793*4882a593Smuzhiyun 	ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/* Check the ack and deliver it if it is ok */
796*4882a593Smuzhiyun 	if (unlikely(i2400m->trace_msg_from_user))
797*4882a593Smuzhiyun 		wimax_msg(&i2400m->wimax_dev, "echo",
798*4882a593Smuzhiyun 			  ack_l3l4_hdr, ack_len, GFP_KERNEL);
799*4882a593Smuzhiyun 	result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
800*4882a593Smuzhiyun 	if (result < 0) {
801*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
802*4882a593Smuzhiyun 			msg_type, result);
803*4882a593Smuzhiyun 		goto error_bad_ack_len;
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 	if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
806*4882a593Smuzhiyun 		dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
807*4882a593Smuzhiyun 			le16_to_cpu(ack_l3l4_hdr->type), msg_type);
808*4882a593Smuzhiyun 		result = -EIO;
809*4882a593Smuzhiyun 		goto error_bad_ack_type;
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 	i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
812*4882a593Smuzhiyun 	mutex_unlock(&i2400m->msg_mutex);
813*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
814*4882a593Smuzhiyun 		i2400m, buf, buf_len, ack_skb);
815*4882a593Smuzhiyun 	return ack_skb;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun error_bad_ack_type:
818*4882a593Smuzhiyun error_bad_ack_len:
819*4882a593Smuzhiyun 	kfree_skb(ack_skb);
820*4882a593Smuzhiyun error_ack_status:
821*4882a593Smuzhiyun error_wait_for_completion:
822*4882a593Smuzhiyun error_tx:
823*4882a593Smuzhiyun 	mutex_unlock(&i2400m->msg_mutex);
824*4882a593Smuzhiyun error_bad_msg:
825*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
826*4882a593Smuzhiyun 		i2400m, buf, buf_len, result);
827*4882a593Smuzhiyun 	return ERR_PTR(result);
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun  * Definitions for the Enter Power Save command
833*4882a593Smuzhiyun  *
834*4882a593Smuzhiyun  * The Enter Power Save command requests the device to go into power
835*4882a593Smuzhiyun  * saving mode. The device will ack or nak the command depending on it
836*4882a593Smuzhiyun  * being ready for it. If it acks, we tell the USB subsystem to
837*4882a593Smuzhiyun  *
838*4882a593Smuzhiyun  * As well, the device might request to go into power saving mode by
839*4882a593Smuzhiyun  * sending a report (REPORT_POWERSAVE_READY), in which case, we issue
840*4882a593Smuzhiyun  * this command. The hookups in the RX coder allow
841*4882a593Smuzhiyun  */
842*4882a593Smuzhiyun enum {
843*4882a593Smuzhiyun 	I2400M_WAKEUP_ENABLED  = 0x01,
844*4882a593Smuzhiyun 	I2400M_WAKEUP_DISABLED = 0x02,
845*4882a593Smuzhiyun 	I2400M_TLV_TYPE_WAKEUP_MODE = 144,
846*4882a593Smuzhiyun };
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun struct i2400m_cmd_enter_power_save {
849*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr hdr;
850*4882a593Smuzhiyun 	struct i2400m_tlv_hdr tlv;
851*4882a593Smuzhiyun 	__le32 val;
852*4882a593Smuzhiyun } __packed;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun  * Request entering power save
857*4882a593Smuzhiyun  *
858*4882a593Smuzhiyun  * This command is (mainly) executed when the device indicates that it
859*4882a593Smuzhiyun  * is ready to go into powersave mode via a REPORT_POWERSAVE_READY.
860*4882a593Smuzhiyun  */
i2400m_cmd_enter_powersave(struct i2400m * i2400m)861*4882a593Smuzhiyun int i2400m_cmd_enter_powersave(struct i2400m *i2400m)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	int result;
864*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
865*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
866*4882a593Smuzhiyun 	struct i2400m_cmd_enter_power_save *cmd;
867*4882a593Smuzhiyun 	char strerr[32];
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	result = -ENOMEM;
870*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
871*4882a593Smuzhiyun 	if (cmd == NULL)
872*4882a593Smuzhiyun 		goto error_alloc;
873*4882a593Smuzhiyun 	cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE);
874*4882a593Smuzhiyun 	cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
875*4882a593Smuzhiyun 	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
876*4882a593Smuzhiyun 	cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE);
877*4882a593Smuzhiyun 	cmd->tlv.length = cpu_to_le16(sizeof(cmd->val));
878*4882a593Smuzhiyun 	cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
881*4882a593Smuzhiyun 	result = PTR_ERR(ack_skb);
882*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
883*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'Enter power save' command: %d\n",
884*4882a593Smuzhiyun 			result);
885*4882a593Smuzhiyun 		goto error_msg_to_dev;
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun 	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
888*4882a593Smuzhiyun 					 strerr, sizeof(strerr));
889*4882a593Smuzhiyun 	if (result == -EACCES)
890*4882a593Smuzhiyun 		d_printf(1, dev, "Cannot enter power save mode\n");
891*4882a593Smuzhiyun 	else if (result < 0)
892*4882a593Smuzhiyun 		dev_err(dev, "'Enter power save' (0x%04x) command failed: "
893*4882a593Smuzhiyun 			"%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE,
894*4882a593Smuzhiyun 			result, strerr);
895*4882a593Smuzhiyun 	else
896*4882a593Smuzhiyun 		d_printf(1, dev, "device ready to power save\n");
897*4882a593Smuzhiyun 	kfree_skb(ack_skb);
898*4882a593Smuzhiyun error_msg_to_dev:
899*4882a593Smuzhiyun 	kfree(cmd);
900*4882a593Smuzhiyun error_alloc:
901*4882a593Smuzhiyun 	return result;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun  * Definitions for getting device information
908*4882a593Smuzhiyun  */
909*4882a593Smuzhiyun enum {
910*4882a593Smuzhiyun 	I2400M_TLV_DETAILED_DEVICE_INFO = 140
911*4882a593Smuzhiyun };
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun /**
914*4882a593Smuzhiyun  * i2400m_get_device_info - Query the device for detailed device information
915*4882a593Smuzhiyun  *
916*4882a593Smuzhiyun  * @i2400m: device descriptor
917*4882a593Smuzhiyun  *
918*4882a593Smuzhiyun  * Returns: an skb whose skb->data points to a 'struct
919*4882a593Smuzhiyun  *    i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The
920*4882a593Smuzhiyun  *    skb is *guaranteed* to contain the whole TLV data structure.
921*4882a593Smuzhiyun  *
922*4882a593Smuzhiyun  *    On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error
923*4882a593Smuzhiyun  *    code.
924*4882a593Smuzhiyun  */
i2400m_get_device_info(struct i2400m * i2400m)925*4882a593Smuzhiyun struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	int result;
928*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
929*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
930*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr *cmd;
931*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *ack;
932*4882a593Smuzhiyun 	size_t ack_len;
933*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv;
934*4882a593Smuzhiyun 	const struct i2400m_tlv_detailed_device_info *ddi;
935*4882a593Smuzhiyun 	char strerr[32];
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	ack_skb = ERR_PTR(-ENOMEM);
938*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
939*4882a593Smuzhiyun 	if (cmd == NULL)
940*4882a593Smuzhiyun 		goto error_alloc;
941*4882a593Smuzhiyun 	cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO);
942*4882a593Smuzhiyun 	cmd->length = 0;
943*4882a593Smuzhiyun 	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
946*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
947*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'get device info' command: %ld\n",
948*4882a593Smuzhiyun 			PTR_ERR(ack_skb));
949*4882a593Smuzhiyun 		goto error_msg_to_dev;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 	ack = wimax_msg_data_len(ack_skb, &ack_len);
952*4882a593Smuzhiyun 	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
953*4882a593Smuzhiyun 	if (result < 0) {
954*4882a593Smuzhiyun 		dev_err(dev, "'get device info' (0x%04x) command failed: "
955*4882a593Smuzhiyun 			"%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result,
956*4882a593Smuzhiyun 			strerr);
957*4882a593Smuzhiyun 		goto error_cmd_failed;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 	tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
960*4882a593Smuzhiyun 			      I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi));
961*4882a593Smuzhiyun 	if (tlv == NULL) {
962*4882a593Smuzhiyun 		dev_err(dev, "GET DEVICE INFO: "
963*4882a593Smuzhiyun 			"detailed device info TLV not found (0x%04x)\n",
964*4882a593Smuzhiyun 			I2400M_TLV_DETAILED_DEVICE_INFO);
965*4882a593Smuzhiyun 		result = -EIO;
966*4882a593Smuzhiyun 		goto error_no_tlv;
967*4882a593Smuzhiyun 	}
968*4882a593Smuzhiyun 	skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data);
969*4882a593Smuzhiyun error_msg_to_dev:
970*4882a593Smuzhiyun 	kfree(cmd);
971*4882a593Smuzhiyun error_alloc:
972*4882a593Smuzhiyun 	return ack_skb;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun error_no_tlv:
975*4882a593Smuzhiyun error_cmd_failed:
976*4882a593Smuzhiyun 	kfree_skb(ack_skb);
977*4882a593Smuzhiyun 	kfree(cmd);
978*4882a593Smuzhiyun 	return ERR_PTR(result);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun /* Firmware interface versions we support */
983*4882a593Smuzhiyun enum {
984*4882a593Smuzhiyun 	I2400M_HDIv_MAJOR = 9,
985*4882a593Smuzhiyun 	I2400M_HDIv_MINOR = 1,
986*4882a593Smuzhiyun 	I2400M_HDIv_MINOR_2 = 2,
987*4882a593Smuzhiyun };
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun /**
991*4882a593Smuzhiyun  * i2400m_firmware_check - check firmware versions are compatible with
992*4882a593Smuzhiyun  * the driver
993*4882a593Smuzhiyun  *
994*4882a593Smuzhiyun  * @i2400m: device descriptor
995*4882a593Smuzhiyun  *
996*4882a593Smuzhiyun  * Returns: 0 if ok, < 0 errno code an error and a message in the
997*4882a593Smuzhiyun  *    kernel log.
998*4882a593Smuzhiyun  *
999*4882a593Smuzhiyun  * Long function, but quite simple; first chunk launches the command
1000*4882a593Smuzhiyun  * and double checks the reply for the right TLV. Then we process the
1001*4882a593Smuzhiyun  * TLV (where the meat is).
1002*4882a593Smuzhiyun  *
1003*4882a593Smuzhiyun  * Once we process the TLV that gives us the firmware's interface
1004*4882a593Smuzhiyun  * version, we encode it and save it in i2400m->fw_version for future
1005*4882a593Smuzhiyun  * reference.
1006*4882a593Smuzhiyun  */
i2400m_firmware_check(struct i2400m * i2400m)1007*4882a593Smuzhiyun int i2400m_firmware_check(struct i2400m *i2400m)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	int result;
1010*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1011*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
1012*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr *cmd;
1013*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *ack;
1014*4882a593Smuzhiyun 	size_t ack_len;
1015*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv;
1016*4882a593Smuzhiyun 	const struct i2400m_tlv_l4_message_versions *l4mv;
1017*4882a593Smuzhiyun 	char strerr[32];
1018*4882a593Smuzhiyun 	unsigned major, minor, branch;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	result = -ENOMEM;
1021*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1022*4882a593Smuzhiyun 	if (cmd == NULL)
1023*4882a593Smuzhiyun 		goto error_alloc;
1024*4882a593Smuzhiyun 	cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION);
1025*4882a593Smuzhiyun 	cmd->length = 0;
1026*4882a593Smuzhiyun 	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
1029*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
1030*4882a593Smuzhiyun 		result = PTR_ERR(ack_skb);
1031*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'get lm version' command: %-d\n",
1032*4882a593Smuzhiyun 			result);
1033*4882a593Smuzhiyun 		goto error_msg_to_dev;
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 	ack = wimax_msg_data_len(ack_skb, &ack_len);
1036*4882a593Smuzhiyun 	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
1037*4882a593Smuzhiyun 	if (result < 0) {
1038*4882a593Smuzhiyun 		dev_err(dev, "'get lm version' (0x%04x) command failed: "
1039*4882a593Smuzhiyun 			"%d - %s\n", I2400M_MT_GET_LM_VERSION, result,
1040*4882a593Smuzhiyun 			strerr);
1041*4882a593Smuzhiyun 		goto error_cmd_failed;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 	tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
1044*4882a593Smuzhiyun 			      I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv));
1045*4882a593Smuzhiyun 	if (tlv == NULL) {
1046*4882a593Smuzhiyun 		dev_err(dev, "get lm version: TLV not found (0x%04x)\n",
1047*4882a593Smuzhiyun 			I2400M_TLV_L4_MESSAGE_VERSIONS);
1048*4882a593Smuzhiyun 		result = -EIO;
1049*4882a593Smuzhiyun 		goto error_no_tlv;
1050*4882a593Smuzhiyun 	}
1051*4882a593Smuzhiyun 	l4mv = container_of(tlv, typeof(*l4mv), hdr);
1052*4882a593Smuzhiyun 	major = le16_to_cpu(l4mv->major);
1053*4882a593Smuzhiyun 	minor = le16_to_cpu(l4mv->minor);
1054*4882a593Smuzhiyun 	branch = le16_to_cpu(l4mv->branch);
1055*4882a593Smuzhiyun 	result = -EINVAL;
1056*4882a593Smuzhiyun 	if (major != I2400M_HDIv_MAJOR) {
1057*4882a593Smuzhiyun 		dev_err(dev, "unsupported major fw version "
1058*4882a593Smuzhiyun 			"%u.%u.%u\n", major, minor, branch);
1059*4882a593Smuzhiyun 		goto error_bad_major;
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun 	result = 0;
1062*4882a593Smuzhiyun 	if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
1063*4882a593Smuzhiyun 		dev_warn(dev, "untested minor fw version %u.%u.%u\n",
1064*4882a593Smuzhiyun 			 major, minor, branch);
1065*4882a593Smuzhiyun 	/* Yes, we ignore the branch -- we don't have to track it */
1066*4882a593Smuzhiyun 	i2400m->fw_version = major << 16 | minor;
1067*4882a593Smuzhiyun 	dev_info(dev, "firmware interface version %u.%u.%u\n",
1068*4882a593Smuzhiyun 		 major, minor, branch);
1069*4882a593Smuzhiyun error_bad_major:
1070*4882a593Smuzhiyun error_no_tlv:
1071*4882a593Smuzhiyun error_cmd_failed:
1072*4882a593Smuzhiyun 	kfree_skb(ack_skb);
1073*4882a593Smuzhiyun error_msg_to_dev:
1074*4882a593Smuzhiyun 	kfree(cmd);
1075*4882a593Smuzhiyun error_alloc:
1076*4882a593Smuzhiyun 	return result;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun /*
1081*4882a593Smuzhiyun  * Send an DoExitIdle command to the device to ask it to go out of
1082*4882a593Smuzhiyun  * basestation-idle mode.
1083*4882a593Smuzhiyun  *
1084*4882a593Smuzhiyun  * @i2400m: device descriptor
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * This starts a renegotiation with the basestation that might involve
1087*4882a593Smuzhiyun  * another crypto handshake with user space.
1088*4882a593Smuzhiyun  *
1089*4882a593Smuzhiyun  * Returns: 0 if ok, < 0 errno code on error.
1090*4882a593Smuzhiyun  */
i2400m_cmd_exit_idle(struct i2400m * i2400m)1091*4882a593Smuzhiyun int i2400m_cmd_exit_idle(struct i2400m *i2400m)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	int result;
1094*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1095*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
1096*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr *cmd;
1097*4882a593Smuzhiyun 	char strerr[32];
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	result = -ENOMEM;
1100*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1101*4882a593Smuzhiyun 	if (cmd == NULL)
1102*4882a593Smuzhiyun 		goto error_alloc;
1103*4882a593Smuzhiyun 	cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE);
1104*4882a593Smuzhiyun 	cmd->length = 0;
1105*4882a593Smuzhiyun 	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
1108*4882a593Smuzhiyun 	result = PTR_ERR(ack_skb);
1109*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
1110*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'exit idle' command: %d\n",
1111*4882a593Smuzhiyun 			result);
1112*4882a593Smuzhiyun 		goto error_msg_to_dev;
1113*4882a593Smuzhiyun 	}
1114*4882a593Smuzhiyun 	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
1115*4882a593Smuzhiyun 					 strerr, sizeof(strerr));
1116*4882a593Smuzhiyun 	kfree_skb(ack_skb);
1117*4882a593Smuzhiyun error_msg_to_dev:
1118*4882a593Smuzhiyun 	kfree(cmd);
1119*4882a593Smuzhiyun error_alloc:
1120*4882a593Smuzhiyun 	return result;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun /*
1126*4882a593Smuzhiyun  * Query the device for its state, update the WiMAX stack's idea of it
1127*4882a593Smuzhiyun  *
1128*4882a593Smuzhiyun  * @i2400m: device descriptor
1129*4882a593Smuzhiyun  *
1130*4882a593Smuzhiyun  * Returns: 0 if ok, < 0 errno code on error.
1131*4882a593Smuzhiyun  *
1132*4882a593Smuzhiyun  * Executes a 'Get State' command and parses the returned
1133*4882a593Smuzhiyun  * TLVs.
1134*4882a593Smuzhiyun  *
1135*4882a593Smuzhiyun  * Because this is almost identical to a 'Report State', we use
1136*4882a593Smuzhiyun  * i2400m_report_state_hook() to parse the answer. This will set the
1137*4882a593Smuzhiyun  * carrier state, as well as the RF Kill switches state.
1138*4882a593Smuzhiyun  */
i2400m_cmd_get_state(struct i2400m * i2400m)1139*4882a593Smuzhiyun static int i2400m_cmd_get_state(struct i2400m *i2400m)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	int result;
1142*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1143*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
1144*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr *cmd;
1145*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *ack;
1146*4882a593Smuzhiyun 	size_t ack_len;
1147*4882a593Smuzhiyun 	char strerr[32];
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	result = -ENOMEM;
1150*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1151*4882a593Smuzhiyun 	if (cmd == NULL)
1152*4882a593Smuzhiyun 		goto error_alloc;
1153*4882a593Smuzhiyun 	cmd->type = cpu_to_le16(I2400M_MT_GET_STATE);
1154*4882a593Smuzhiyun 	cmd->length = 0;
1155*4882a593Smuzhiyun 	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
1158*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
1159*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'get state' command: %ld\n",
1160*4882a593Smuzhiyun 			PTR_ERR(ack_skb));
1161*4882a593Smuzhiyun 		result = PTR_ERR(ack_skb);
1162*4882a593Smuzhiyun 		goto error_msg_to_dev;
1163*4882a593Smuzhiyun 	}
1164*4882a593Smuzhiyun 	ack = wimax_msg_data_len(ack_skb, &ack_len);
1165*4882a593Smuzhiyun 	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
1166*4882a593Smuzhiyun 	if (result < 0) {
1167*4882a593Smuzhiyun 		dev_err(dev, "'get state' (0x%04x) command failed: "
1168*4882a593Smuzhiyun 			"%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
1169*4882a593Smuzhiyun 		goto error_cmd_failed;
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 	i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack),
1172*4882a593Smuzhiyun 				 "GET STATE");
1173*4882a593Smuzhiyun 	result = 0;
1174*4882a593Smuzhiyun 	kfree_skb(ack_skb);
1175*4882a593Smuzhiyun error_cmd_failed:
1176*4882a593Smuzhiyun error_msg_to_dev:
1177*4882a593Smuzhiyun 	kfree(cmd);
1178*4882a593Smuzhiyun error_alloc:
1179*4882a593Smuzhiyun 	return result;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun /**
1183*4882a593Smuzhiyun  * Set basic configuration settings
1184*4882a593Smuzhiyun  *
1185*4882a593Smuzhiyun  * @i2400m: device descriptor
1186*4882a593Smuzhiyun  * @args: array of pointers to the TLV headers to send for
1187*4882a593Smuzhiyun  *     configuration (each followed by its payload).
1188*4882a593Smuzhiyun  *     TLV headers and payloads must be properly initialized, with the
1189*4882a593Smuzhiyun  *     right endianess (LE).
1190*4882a593Smuzhiyun  * @arg_size: number of pointers in the @args array
1191*4882a593Smuzhiyun  */
i2400m_set_init_config(struct i2400m * i2400m,const struct i2400m_tlv_hdr ** arg,size_t args)1192*4882a593Smuzhiyun static int i2400m_set_init_config(struct i2400m *i2400m,
1193*4882a593Smuzhiyun 				  const struct i2400m_tlv_hdr **arg,
1194*4882a593Smuzhiyun 				  size_t args)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun 	int result;
1197*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1198*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
1199*4882a593Smuzhiyun 	struct i2400m_l3l4_hdr *cmd;
1200*4882a593Smuzhiyun 	char strerr[32];
1201*4882a593Smuzhiyun 	unsigned argc, argsize, tlv_size;
1202*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *tlv_hdr;
1203*4882a593Smuzhiyun 	void *buf, *itr;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args);
1206*4882a593Smuzhiyun 	result = 0;
1207*4882a593Smuzhiyun 	if (args == 0)
1208*4882a593Smuzhiyun 		goto none;
1209*4882a593Smuzhiyun 	/* Compute the size of all the TLVs, so we can alloc a
1210*4882a593Smuzhiyun 	 * contiguous command block to copy them. */
1211*4882a593Smuzhiyun 	argsize = 0;
1212*4882a593Smuzhiyun 	for (argc = 0; argc < args; argc++) {
1213*4882a593Smuzhiyun 		tlv_hdr = arg[argc];
1214*4882a593Smuzhiyun 		argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 	WARN_ON(argc >= 9);	/* As per hw spec */
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	/* Alloc the space for the command and TLVs*/
1219*4882a593Smuzhiyun 	result = -ENOMEM;
1220*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL);
1221*4882a593Smuzhiyun 	if (buf == NULL)
1222*4882a593Smuzhiyun 		goto error_alloc;
1223*4882a593Smuzhiyun 	cmd = buf;
1224*4882a593Smuzhiyun 	cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG);
1225*4882a593Smuzhiyun 	cmd->length = cpu_to_le16(argsize);
1226*4882a593Smuzhiyun 	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	/* Copy the TLVs */
1229*4882a593Smuzhiyun 	itr = buf + sizeof(*cmd);
1230*4882a593Smuzhiyun 	for (argc = 0; argc < args; argc++) {
1231*4882a593Smuzhiyun 		tlv_hdr = arg[argc];
1232*4882a593Smuzhiyun 		tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
1233*4882a593Smuzhiyun 		memcpy(itr, tlv_hdr, tlv_size);
1234*4882a593Smuzhiyun 		itr += tlv_size;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	/* Send the message! */
1238*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize);
1239*4882a593Smuzhiyun 	result = PTR_ERR(ack_skb);
1240*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
1241*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'init config' command: %d\n",
1242*4882a593Smuzhiyun 			result);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 		goto error_msg_to_dev;
1245*4882a593Smuzhiyun 	}
1246*4882a593Smuzhiyun 	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
1247*4882a593Smuzhiyun 					 strerr, sizeof(strerr));
1248*4882a593Smuzhiyun 	if (result < 0)
1249*4882a593Smuzhiyun 		dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n",
1250*4882a593Smuzhiyun 			I2400M_MT_SET_INIT_CONFIG, result, strerr);
1251*4882a593Smuzhiyun 	kfree_skb(ack_skb);
1252*4882a593Smuzhiyun error_msg_to_dev:
1253*4882a593Smuzhiyun 	kfree(buf);
1254*4882a593Smuzhiyun error_alloc:
1255*4882a593Smuzhiyun none:
1256*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n",
1257*4882a593Smuzhiyun 		i2400m, arg, args, result);
1258*4882a593Smuzhiyun 	return result;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun /**
1263*4882a593Smuzhiyun  * i2400m_set_idle_timeout - Set the device's idle mode timeout
1264*4882a593Smuzhiyun  *
1265*4882a593Smuzhiyun  * @i2400m: i2400m device descriptor
1266*4882a593Smuzhiyun  *
1267*4882a593Smuzhiyun  * @msecs: milliseconds for the timeout to enter idle mode. Between
1268*4882a593Smuzhiyun  *     100 to 300000 (5m); 0 to disable. In increments of 100.
1269*4882a593Smuzhiyun  *
1270*4882a593Smuzhiyun  * After this @msecs of the link being idle (no data being sent or
1271*4882a593Smuzhiyun  * received), the device will negotiate with the basestation entering
1272*4882a593Smuzhiyun  * idle mode for saving power. The connection is maintained, but
1273*4882a593Smuzhiyun  * getting out of it (done in tx.c) will require some negotiation,
1274*4882a593Smuzhiyun  * possible crypto re-handshake and a possible DHCP re-lease.
1275*4882a593Smuzhiyun  *
1276*4882a593Smuzhiyun  * Only available if fw_version >= 0x00090002.
1277*4882a593Smuzhiyun  *
1278*4882a593Smuzhiyun  * Returns: 0 if ok, < 0 errno code on error.
1279*4882a593Smuzhiyun  */
i2400m_set_idle_timeout(struct i2400m * i2400m,unsigned msecs)1280*4882a593Smuzhiyun int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	int result;
1283*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1284*4882a593Smuzhiyun 	struct sk_buff *ack_skb;
1285*4882a593Smuzhiyun 	struct {
1286*4882a593Smuzhiyun 		struct i2400m_l3l4_hdr hdr;
1287*4882a593Smuzhiyun 		struct i2400m_tlv_config_idle_timeout cit;
1288*4882a593Smuzhiyun 	} *cmd;
1289*4882a593Smuzhiyun 	const struct i2400m_l3l4_hdr *ack;
1290*4882a593Smuzhiyun 	size_t ack_len;
1291*4882a593Smuzhiyun 	char strerr[32];
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	result = -ENOSYS;
1294*4882a593Smuzhiyun 	if (i2400m_le_v1_3(i2400m))
1295*4882a593Smuzhiyun 		goto error_alloc;
1296*4882a593Smuzhiyun 	result = -ENOMEM;
1297*4882a593Smuzhiyun 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1298*4882a593Smuzhiyun 	if (cmd == NULL)
1299*4882a593Smuzhiyun 		goto error_alloc;
1300*4882a593Smuzhiyun 	cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE);
1301*4882a593Smuzhiyun 	cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
1302*4882a593Smuzhiyun 	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	cmd->cit.hdr.type =
1305*4882a593Smuzhiyun 		cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
1306*4882a593Smuzhiyun 	cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout));
1307*4882a593Smuzhiyun 	cmd->cit.timeout = cpu_to_le32(msecs);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
1310*4882a593Smuzhiyun 	if (IS_ERR(ack_skb)) {
1311*4882a593Smuzhiyun 		dev_err(dev, "Failed to issue 'set idle timeout' command: "
1312*4882a593Smuzhiyun 			"%ld\n", PTR_ERR(ack_skb));
1313*4882a593Smuzhiyun 		result = PTR_ERR(ack_skb);
1314*4882a593Smuzhiyun 		goto error_msg_to_dev;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun 	ack = wimax_msg_data_len(ack_skb, &ack_len);
1317*4882a593Smuzhiyun 	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
1318*4882a593Smuzhiyun 	if (result < 0) {
1319*4882a593Smuzhiyun 		dev_err(dev, "'set idle timeout' (0x%04x) command failed: "
1320*4882a593Smuzhiyun 			"%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
1321*4882a593Smuzhiyun 		goto error_cmd_failed;
1322*4882a593Smuzhiyun 	}
1323*4882a593Smuzhiyun 	result = 0;
1324*4882a593Smuzhiyun 	kfree_skb(ack_skb);
1325*4882a593Smuzhiyun error_cmd_failed:
1326*4882a593Smuzhiyun error_msg_to_dev:
1327*4882a593Smuzhiyun 	kfree(cmd);
1328*4882a593Smuzhiyun error_alloc:
1329*4882a593Smuzhiyun 	return result;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun /**
1334*4882a593Smuzhiyun  * i2400m_dev_initialize - Initialize the device once communications are ready
1335*4882a593Smuzhiyun  *
1336*4882a593Smuzhiyun  * @i2400m: device descriptor
1337*4882a593Smuzhiyun  *
1338*4882a593Smuzhiyun  * Returns: 0 if ok, < 0 errno code on error.
1339*4882a593Smuzhiyun  *
1340*4882a593Smuzhiyun  * Configures the device to work the way we like it.
1341*4882a593Smuzhiyun  *
1342*4882a593Smuzhiyun  * At the point of this call, the device is registered with the WiMAX
1343*4882a593Smuzhiyun  * and netdev stacks, firmware is uploaded and we can talk to the
1344*4882a593Smuzhiyun  * device normally.
1345*4882a593Smuzhiyun  */
i2400m_dev_initialize(struct i2400m * i2400m)1346*4882a593Smuzhiyun int i2400m_dev_initialize(struct i2400m *i2400m)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun 	int result;
1349*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1350*4882a593Smuzhiyun 	struct i2400m_tlv_config_idle_parameters idle_params;
1351*4882a593Smuzhiyun 	struct i2400m_tlv_config_idle_timeout idle_timeout;
1352*4882a593Smuzhiyun 	struct i2400m_tlv_config_d2h_data_format df;
1353*4882a593Smuzhiyun 	struct i2400m_tlv_config_dl_host_reorder dlhr;
1354*4882a593Smuzhiyun 	const struct i2400m_tlv_hdr *args[9];
1355*4882a593Smuzhiyun 	unsigned argc = 0;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1358*4882a593Smuzhiyun 	if (i2400m_passive_mode)
1359*4882a593Smuzhiyun 		goto out_passive;
1360*4882a593Smuzhiyun 	/* Disable idle mode? (enabled by default) */
1361*4882a593Smuzhiyun 	if (i2400m_idle_mode_disabled) {
1362*4882a593Smuzhiyun 		if (i2400m_le_v1_3(i2400m)) {
1363*4882a593Smuzhiyun 			idle_params.hdr.type =
1364*4882a593Smuzhiyun 				cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS);
1365*4882a593Smuzhiyun 			idle_params.hdr.length = cpu_to_le16(
1366*4882a593Smuzhiyun 				sizeof(idle_params) - sizeof(idle_params.hdr));
1367*4882a593Smuzhiyun 			idle_params.idle_timeout = 0;
1368*4882a593Smuzhiyun 			idle_params.idle_paging_interval = 0;
1369*4882a593Smuzhiyun 			args[argc++] = &idle_params.hdr;
1370*4882a593Smuzhiyun 		} else {
1371*4882a593Smuzhiyun 			idle_timeout.hdr.type =
1372*4882a593Smuzhiyun 				cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
1373*4882a593Smuzhiyun 			idle_timeout.hdr.length = cpu_to_le16(
1374*4882a593Smuzhiyun 				sizeof(idle_timeout) - sizeof(idle_timeout.hdr));
1375*4882a593Smuzhiyun 			idle_timeout.timeout = 0;
1376*4882a593Smuzhiyun 			args[argc++] = &idle_timeout.hdr;
1377*4882a593Smuzhiyun 		}
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 	if (i2400m_ge_v1_4(i2400m)) {
1380*4882a593Smuzhiyun 		/* Enable extended RX data format? */
1381*4882a593Smuzhiyun 		df.hdr.type =
1382*4882a593Smuzhiyun 			cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT);
1383*4882a593Smuzhiyun 		df.hdr.length = cpu_to_le16(
1384*4882a593Smuzhiyun 			sizeof(df) - sizeof(df.hdr));
1385*4882a593Smuzhiyun 		df.format = 1;
1386*4882a593Smuzhiyun 		args[argc++] = &df.hdr;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 		/* Enable RX data reordering?
1389*4882a593Smuzhiyun 		 * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */
1390*4882a593Smuzhiyun 		if (i2400m->rx_reorder) {
1391*4882a593Smuzhiyun 			dlhr.hdr.type =
1392*4882a593Smuzhiyun 				cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER);
1393*4882a593Smuzhiyun 			dlhr.hdr.length = cpu_to_le16(
1394*4882a593Smuzhiyun 				sizeof(dlhr) - sizeof(dlhr.hdr));
1395*4882a593Smuzhiyun 			dlhr.reorder = 1;
1396*4882a593Smuzhiyun 			args[argc++] = &dlhr.hdr;
1397*4882a593Smuzhiyun 		}
1398*4882a593Smuzhiyun 	}
1399*4882a593Smuzhiyun 	result = i2400m_set_init_config(i2400m, args, argc);
1400*4882a593Smuzhiyun 	if (result < 0)
1401*4882a593Smuzhiyun 		goto error;
1402*4882a593Smuzhiyun out_passive:
1403*4882a593Smuzhiyun 	/*
1404*4882a593Smuzhiyun 	 * Update state: Here it just calls a get state; parsing the
1405*4882a593Smuzhiyun 	 * result (System State TLV and RF Status TLV [done in the rx
1406*4882a593Smuzhiyun 	 * path hooks]) will set the hardware and software RF-Kill
1407*4882a593Smuzhiyun 	 * status.
1408*4882a593Smuzhiyun 	 */
1409*4882a593Smuzhiyun 	result = i2400m_cmd_get_state(i2400m);
1410*4882a593Smuzhiyun error:
1411*4882a593Smuzhiyun 	if (result < 0)
1412*4882a593Smuzhiyun 		dev_err(dev, "failed to initialize the device: %d\n", result);
1413*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
1414*4882a593Smuzhiyun 	return result;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun  * i2400m_dev_shutdown - Shutdown a running device
1420*4882a593Smuzhiyun  *
1421*4882a593Smuzhiyun  * @i2400m: device descriptor
1422*4882a593Smuzhiyun  *
1423*4882a593Smuzhiyun  * Release resources acquired during the running of the device; in
1424*4882a593Smuzhiyun  * theory, should also tell the device to go to sleep, switch off the
1425*4882a593Smuzhiyun  * radio, all that, but at this point, in most cases (driver
1426*4882a593Smuzhiyun  * disconnection, reset handling) we can't even talk to the device.
1427*4882a593Smuzhiyun  */
i2400m_dev_shutdown(struct i2400m * i2400m)1428*4882a593Smuzhiyun void i2400m_dev_shutdown(struct i2400m *i2400m)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun 	struct device *dev = i2400m_dev(i2400m);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1433*4882a593Smuzhiyun 	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
1434*4882a593Smuzhiyun }
1435