xref: /OK3568_Linux_fs/kernel/drivers/base/firmware_loader/firmware.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __FIRMWARE_LOADER_H
3*4882a593Smuzhiyun #define __FIRMWARE_LOADER_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/bitops.h>
6*4882a593Smuzhiyun #include <linux/firmware.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/kref.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/completion.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <generated/utsrelease.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /**
15*4882a593Smuzhiyun  * enum fw_opt - options to control firmware loading behaviour
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * @FW_OPT_UEVENT: Enables the fallback mechanism to send a kobject uevent
18*4882a593Smuzhiyun  *	when the firmware is not found. Userspace is in charge to load the
19*4882a593Smuzhiyun  *	firmware using the sysfs loading facility.
20*4882a593Smuzhiyun  * @FW_OPT_NOWAIT: Used to describe the firmware request is asynchronous.
21*4882a593Smuzhiyun  * @FW_OPT_USERHELPER: Enable the fallback mechanism, in case the direct
22*4882a593Smuzhiyun  *	filesystem lookup fails at finding the firmware.  For details refer to
23*4882a593Smuzhiyun  *	firmware_fallback_sysfs().
24*4882a593Smuzhiyun  * @FW_OPT_NO_WARN: Quiet, avoid printing warning messages.
25*4882a593Smuzhiyun  * @FW_OPT_NOCACHE: Disables firmware caching. Firmware caching is used to
26*4882a593Smuzhiyun  *	cache the firmware upon suspend, so that upon resume races against the
27*4882a593Smuzhiyun  *	firmware file lookup on storage is avoided. Used for calls where the
28*4882a593Smuzhiyun  *	file may be too big, or where the driver takes charge of its own
29*4882a593Smuzhiyun  *	firmware caching mechanism.
30*4882a593Smuzhiyun  * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
31*4882a593Smuzhiyun  *	precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
32*4882a593Smuzhiyun  * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in
33*4882a593Smuzhiyun  *	the platform's main firmware. If both this fallback and the sysfs
34*4882a593Smuzhiyun  *      fallback are enabled, then this fallback will be tried first.
35*4882a593Smuzhiyun  * @FW_OPT_PARTIAL: Allow partial read of firmware instead of needing to read
36*4882a593Smuzhiyun  *	entire file.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun enum fw_opt {
39*4882a593Smuzhiyun 	FW_OPT_UEVENT			= BIT(0),
40*4882a593Smuzhiyun 	FW_OPT_NOWAIT			= BIT(1),
41*4882a593Smuzhiyun 	FW_OPT_USERHELPER		= BIT(2),
42*4882a593Smuzhiyun 	FW_OPT_NO_WARN			= BIT(3),
43*4882a593Smuzhiyun 	FW_OPT_NOCACHE			= BIT(4),
44*4882a593Smuzhiyun 	FW_OPT_NOFALLBACK_SYSFS		= BIT(5),
45*4882a593Smuzhiyun 	FW_OPT_FALLBACK_PLATFORM	= BIT(6),
46*4882a593Smuzhiyun 	FW_OPT_PARTIAL			= BIT(7),
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun enum fw_status {
50*4882a593Smuzhiyun 	FW_STATUS_UNKNOWN,
51*4882a593Smuzhiyun 	FW_STATUS_LOADING,
52*4882a593Smuzhiyun 	FW_STATUS_DONE,
53*4882a593Smuzhiyun 	FW_STATUS_ABORTED,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Concurrent request_firmware() for the same firmware need to be
58*4882a593Smuzhiyun  * serialized.  struct fw_state is simple state machine which hold the
59*4882a593Smuzhiyun  * state of the firmware loading.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun struct fw_state {
62*4882a593Smuzhiyun 	struct completion completion;
63*4882a593Smuzhiyun 	enum fw_status status;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct fw_priv {
67*4882a593Smuzhiyun 	struct kref ref;
68*4882a593Smuzhiyun 	struct list_head list;
69*4882a593Smuzhiyun 	struct firmware_cache *fwc;
70*4882a593Smuzhiyun 	struct fw_state fw_st;
71*4882a593Smuzhiyun 	void *data;
72*4882a593Smuzhiyun 	size_t size;
73*4882a593Smuzhiyun 	size_t allocated_size;
74*4882a593Smuzhiyun 	size_t offset;
75*4882a593Smuzhiyun 	u32 opt_flags;
76*4882a593Smuzhiyun #ifdef CONFIG_FW_LOADER_PAGED_BUF
77*4882a593Smuzhiyun 	bool is_paged_buf;
78*4882a593Smuzhiyun 	struct page **pages;
79*4882a593Smuzhiyun 	int nr_pages;
80*4882a593Smuzhiyun 	int page_array_size;
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun #ifdef CONFIG_FW_LOADER_USER_HELPER
83*4882a593Smuzhiyun 	bool need_uevent;
84*4882a593Smuzhiyun 	struct list_head pending_list;
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 	const char *fw_name;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun extern struct mutex fw_lock;
90*4882a593Smuzhiyun 
__fw_state_check(struct fw_priv * fw_priv,enum fw_status status)91*4882a593Smuzhiyun static inline bool __fw_state_check(struct fw_priv *fw_priv,
92*4882a593Smuzhiyun 				    enum fw_status status)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct fw_state *fw_st = &fw_priv->fw_st;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	return fw_st->status == status;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
__fw_state_wait_common(struct fw_priv * fw_priv,long timeout)99*4882a593Smuzhiyun static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct fw_state *fw_st = &fw_priv->fw_st;
102*4882a593Smuzhiyun 	long ret;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
105*4882a593Smuzhiyun 	if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
106*4882a593Smuzhiyun 		return -ENOENT;
107*4882a593Smuzhiyun 	if (!ret)
108*4882a593Smuzhiyun 		return -ETIMEDOUT;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	return ret < 0 ? ret : 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
__fw_state_set(struct fw_priv * fw_priv,enum fw_status status)113*4882a593Smuzhiyun static inline void __fw_state_set(struct fw_priv *fw_priv,
114*4882a593Smuzhiyun 				  enum fw_status status)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct fw_state *fw_st = &fw_priv->fw_st;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	WRITE_ONCE(fw_st->status, status);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
121*4882a593Smuzhiyun #ifdef CONFIG_FW_LOADER_USER_HELPER
122*4882a593Smuzhiyun 		/*
123*4882a593Smuzhiyun 		 * Doing this here ensures that the fw_priv is deleted from
124*4882a593Smuzhiyun 		 * the pending list in all abort/done paths.
125*4882a593Smuzhiyun 		 */
126*4882a593Smuzhiyun 		list_del_init(&fw_priv->pending_list);
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 		complete_all(&fw_st->completion);
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
fw_state_aborted(struct fw_priv * fw_priv)132*4882a593Smuzhiyun static inline void fw_state_aborted(struct fw_priv *fw_priv)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	__fw_state_set(fw_priv, FW_STATUS_ABORTED);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
fw_state_is_aborted(struct fw_priv * fw_priv)137*4882a593Smuzhiyun static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
fw_state_start(struct fw_priv * fw_priv)142*4882a593Smuzhiyun static inline void fw_state_start(struct fw_priv *fw_priv)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	__fw_state_set(fw_priv, FW_STATUS_LOADING);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
fw_state_done(struct fw_priv * fw_priv)147*4882a593Smuzhiyun static inline void fw_state_done(struct fw_priv *fw_priv)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	__fw_state_set(fw_priv, FW_STATUS_DONE);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun int assign_fw(struct firmware *fw, struct device *device);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #ifdef CONFIG_FW_LOADER_PAGED_BUF
155*4882a593Smuzhiyun void fw_free_paged_buf(struct fw_priv *fw_priv);
156*4882a593Smuzhiyun int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
157*4882a593Smuzhiyun int fw_map_paged_buf(struct fw_priv *fw_priv);
158*4882a593Smuzhiyun bool fw_is_paged_buf(struct fw_priv *fw_priv);
159*4882a593Smuzhiyun #else
fw_free_paged_buf(struct fw_priv * fw_priv)160*4882a593Smuzhiyun static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
fw_grow_paged_buf(struct fw_priv * fw_priv,int pages_needed)161*4882a593Smuzhiyun static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
fw_map_paged_buf(struct fw_priv * fw_priv)162*4882a593Smuzhiyun static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
fw_is_paged_buf(struct fw_priv * fw_priv)163*4882a593Smuzhiyun static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #endif /* __FIRMWARE_LOADER_H */
167