1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright © 2014 Intel Corporation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun * IN THE SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <linux/firmware.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "i915_drv.h"
28*4882a593Smuzhiyun #include "i915_reg.h"
29*4882a593Smuzhiyun #include "intel_csr.h"
30*4882a593Smuzhiyun #include "intel_de.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun * DOC: csr support for dmc
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Display Context Save and Restore (CSR) firmware support added from gen9
36*4882a593Smuzhiyun * onwards to drive newly added DMC (Display microcontroller) in display
37*4882a593Smuzhiyun * engine to save and restore the state of display engine when it enter into
38*4882a593Smuzhiyun * low-power state and comes back to normal.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin"
44*4882a593Smuzhiyun #define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
45*4882a593Smuzhiyun MODULE_FIRMWARE(RKL_CSR_PATH);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin"
48*4882a593Smuzhiyun #define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
49*4882a593Smuzhiyun #define TGL_CSR_MAX_FW_SIZE 0x6000
50*4882a593Smuzhiyun MODULE_FIRMWARE(TGL_CSR_PATH);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
53*4882a593Smuzhiyun #define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
54*4882a593Smuzhiyun #define ICL_CSR_MAX_FW_SIZE 0x6000
55*4882a593Smuzhiyun MODULE_FIRMWARE(ICL_CSR_PATH);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
58*4882a593Smuzhiyun #define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
59*4882a593Smuzhiyun #define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
60*4882a593Smuzhiyun MODULE_FIRMWARE(CNL_CSR_PATH);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
63*4882a593Smuzhiyun #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
64*4882a593Smuzhiyun #define GLK_CSR_MAX_FW_SIZE 0x4000
65*4882a593Smuzhiyun MODULE_FIRMWARE(GLK_CSR_PATH);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
68*4882a593Smuzhiyun #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
69*4882a593Smuzhiyun #define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
70*4882a593Smuzhiyun MODULE_FIRMWARE(KBL_CSR_PATH);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
73*4882a593Smuzhiyun #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
74*4882a593Smuzhiyun #define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
75*4882a593Smuzhiyun MODULE_FIRMWARE(SKL_CSR_PATH);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
78*4882a593Smuzhiyun #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
79*4882a593Smuzhiyun #define BXT_CSR_MAX_FW_SIZE 0x3000
80*4882a593Smuzhiyun MODULE_FIRMWARE(BXT_CSR_PATH);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
83*4882a593Smuzhiyun #define PACKAGE_MAX_FW_INFO_ENTRIES 20
84*4882a593Smuzhiyun #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
85*4882a593Smuzhiyun #define DMC_V1_MAX_MMIO_COUNT 8
86*4882a593Smuzhiyun #define DMC_V3_MAX_MMIO_COUNT 20
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct intel_css_header {
89*4882a593Smuzhiyun /* 0x09 for DMC */
90*4882a593Smuzhiyun u32 module_type;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Includes the DMC specific header in dwords */
93*4882a593Smuzhiyun u32 header_len;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* always value would be 0x10000 */
96*4882a593Smuzhiyun u32 header_ver;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Not used */
99*4882a593Smuzhiyun u32 module_id;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Not used */
102*4882a593Smuzhiyun u32 module_vendor;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* in YYYYMMDD format */
105*4882a593Smuzhiyun u32 date;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
108*4882a593Smuzhiyun u32 size;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Not used */
111*4882a593Smuzhiyun u32 key_size;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Not used */
114*4882a593Smuzhiyun u32 modulus_size;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Not used */
117*4882a593Smuzhiyun u32 exponent_size;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Not used */
120*4882a593Smuzhiyun u32 reserved1[12];
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* Major Minor */
123*4882a593Smuzhiyun u32 version;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* Not used */
126*4882a593Smuzhiyun u32 reserved2[8];
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Not used */
129*4882a593Smuzhiyun u32 kernel_header_info;
130*4882a593Smuzhiyun } __packed;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct intel_fw_info {
133*4882a593Smuzhiyun u8 reserved1;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* reserved on package_header version 1, must be 0 on version 2 */
136*4882a593Smuzhiyun u8 dmc_id;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Stepping (A, B, C, ..., *). * is a wildcard */
139*4882a593Smuzhiyun char stepping;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Sub-stepping (0, 1, ..., *). * is a wildcard */
142*4882a593Smuzhiyun char substepping;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun u32 offset;
145*4882a593Smuzhiyun u32 reserved2;
146*4882a593Smuzhiyun } __packed;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun struct intel_package_header {
149*4882a593Smuzhiyun /* DMC container header length in dwords */
150*4882a593Smuzhiyun u8 header_len;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* 0x01, 0x02 */
153*4882a593Smuzhiyun u8 header_ver;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun u8 reserved[10];
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* Number of valid entries in the FWInfo array below */
158*4882a593Smuzhiyun u32 num_entries;
159*4882a593Smuzhiyun } __packed;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun struct intel_dmc_header_base {
162*4882a593Smuzhiyun /* always value would be 0x40403E3E */
163*4882a593Smuzhiyun u32 signature;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* DMC binary header length */
166*4882a593Smuzhiyun u8 header_len;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* 0x01 */
169*4882a593Smuzhiyun u8 header_ver;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Reserved */
172*4882a593Smuzhiyun u16 dmcc_ver;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Major, Minor */
175*4882a593Smuzhiyun u32 project;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* Firmware program size (excluding header) in dwords */
178*4882a593Smuzhiyun u32 fw_size;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Major Minor version */
181*4882a593Smuzhiyun u32 fw_version;
182*4882a593Smuzhiyun } __packed;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct intel_dmc_header_v1 {
185*4882a593Smuzhiyun struct intel_dmc_header_base base;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Number of valid MMIO cycles present. */
188*4882a593Smuzhiyun u32 mmio_count;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* MMIO address */
191*4882a593Smuzhiyun u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* MMIO data */
194*4882a593Smuzhiyun u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* FW filename */
197*4882a593Smuzhiyun char dfile[32];
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun u32 reserved1[2];
200*4882a593Smuzhiyun } __packed;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct intel_dmc_header_v3 {
203*4882a593Smuzhiyun struct intel_dmc_header_base base;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* DMC RAM start MMIO address */
206*4882a593Smuzhiyun u32 start_mmioaddr;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun u32 reserved[9];
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* FW filename */
211*4882a593Smuzhiyun char dfile[32];
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Number of valid MMIO cycles present. */
214*4882a593Smuzhiyun u32 mmio_count;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* MMIO address */
217*4882a593Smuzhiyun u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* MMIO data */
220*4882a593Smuzhiyun u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
221*4882a593Smuzhiyun } __packed;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun struct stepping_info {
224*4882a593Smuzhiyun char stepping;
225*4882a593Smuzhiyun char substepping;
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun static const struct stepping_info skl_stepping_info[] = {
229*4882a593Smuzhiyun {'A', '0'}, {'B', '0'}, {'C', '0'},
230*4882a593Smuzhiyun {'D', '0'}, {'E', '0'}, {'F', '0'},
231*4882a593Smuzhiyun {'G', '0'}, {'H', '0'}, {'I', '0'},
232*4882a593Smuzhiyun {'J', '0'}, {'K', '0'}
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun static const struct stepping_info bxt_stepping_info[] = {
236*4882a593Smuzhiyun {'A', '0'}, {'A', '1'}, {'A', '2'},
237*4882a593Smuzhiyun {'B', '0'}, {'B', '1'}, {'B', '2'}
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static const struct stepping_info icl_stepping_info[] = {
241*4882a593Smuzhiyun {'A', '0'}, {'A', '1'}, {'A', '2'},
242*4882a593Smuzhiyun {'B', '0'}, {'B', '2'},
243*4882a593Smuzhiyun {'C', '0'}
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun static const struct stepping_info no_stepping_info = { '*', '*' };
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private * dev_priv)249*4882a593Smuzhiyun intel_get_stepping_info(struct drm_i915_private *dev_priv)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun const struct stepping_info *si;
252*4882a593Smuzhiyun unsigned int size;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (IS_ICELAKE(dev_priv)) {
255*4882a593Smuzhiyun size = ARRAY_SIZE(icl_stepping_info);
256*4882a593Smuzhiyun si = icl_stepping_info;
257*4882a593Smuzhiyun } else if (IS_SKYLAKE(dev_priv)) {
258*4882a593Smuzhiyun size = ARRAY_SIZE(skl_stepping_info);
259*4882a593Smuzhiyun si = skl_stepping_info;
260*4882a593Smuzhiyun } else if (IS_BROXTON(dev_priv)) {
261*4882a593Smuzhiyun size = ARRAY_SIZE(bxt_stepping_info);
262*4882a593Smuzhiyun si = bxt_stepping_info;
263*4882a593Smuzhiyun } else {
264*4882a593Smuzhiyun size = 0;
265*4882a593Smuzhiyun si = NULL;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (INTEL_REVID(dev_priv) < size)
269*4882a593Smuzhiyun return si + INTEL_REVID(dev_priv);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return &no_stepping_info;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
gen9_set_dc_state_debugmask(struct drm_i915_private * dev_priv)274*4882a593Smuzhiyun static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun u32 val, mask;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (IS_GEN9_LP(dev_priv))
281*4882a593Smuzhiyun mask |= DC_STATE_DEBUG_MASK_CORES;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* The below bit doesn't need to be cleared ever afterwards */
284*4882a593Smuzhiyun val = intel_de_read(dev_priv, DC_STATE_DEBUG);
285*4882a593Smuzhiyun if ((val & mask) != mask) {
286*4882a593Smuzhiyun val |= mask;
287*4882a593Smuzhiyun intel_de_write(dev_priv, DC_STATE_DEBUG, val);
288*4882a593Smuzhiyun intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * intel_csr_load_program() - write the firmware from memory to register.
294*4882a593Smuzhiyun * @dev_priv: i915 drm device.
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * CSR firmware is read from a .bin file and kept in internal memory one time.
297*4882a593Smuzhiyun * Everytime display comes back from low power state this function is called to
298*4882a593Smuzhiyun * copy the firmware from internal memory to registers.
299*4882a593Smuzhiyun */
intel_csr_load_program(struct drm_i915_private * dev_priv)300*4882a593Smuzhiyun void intel_csr_load_program(struct drm_i915_private *dev_priv)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun u32 *payload = dev_priv->csr.dmc_payload;
303*4882a593Smuzhiyun u32 i, fw_size;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (!HAS_CSR(dev_priv)) {
306*4882a593Smuzhiyun drm_err(&dev_priv->drm,
307*4882a593Smuzhiyun "No CSR support available for this platform\n");
308*4882a593Smuzhiyun return;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (!dev_priv->csr.dmc_payload) {
312*4882a593Smuzhiyun drm_err(&dev_priv->drm,
313*4882a593Smuzhiyun "Tried to program CSR with empty payload\n");
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun fw_size = dev_priv->csr.dmc_fw_size;
318*4882a593Smuzhiyun assert_rpm_wakelock_held(&dev_priv->runtime_pm);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun preempt_disable();
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun for (i = 0; i < fw_size; i++)
323*4882a593Smuzhiyun intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
324*4882a593Smuzhiyun payload[i]);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun preempt_enable();
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun for (i = 0; i < dev_priv->csr.mmio_count; i++) {
329*4882a593Smuzhiyun intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
330*4882a593Smuzhiyun dev_priv->csr.mmiodata[i]);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun dev_priv->csr.dc_state = 0;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun gen9_set_dc_state_debugmask(dev_priv);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Search fw_info table for dmc_offset to find firmware binary: num_entries is
340*4882a593Smuzhiyun * already sanitized.
341*4882a593Smuzhiyun */
find_dmc_fw_offset(const struct intel_fw_info * fw_info,unsigned int num_entries,const struct stepping_info * si,u8 package_ver)342*4882a593Smuzhiyun static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
343*4882a593Smuzhiyun unsigned int num_entries,
344*4882a593Smuzhiyun const struct stepping_info *si,
345*4882a593Smuzhiyun u8 package_ver)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
348*4882a593Smuzhiyun unsigned int i;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun for (i = 0; i < num_entries; i++) {
351*4882a593Smuzhiyun if (package_ver > 1 && fw_info[i].dmc_id != 0)
352*4882a593Smuzhiyun continue;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (fw_info[i].substepping == '*' &&
355*4882a593Smuzhiyun si->stepping == fw_info[i].stepping) {
356*4882a593Smuzhiyun dmc_offset = fw_info[i].offset;
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (si->stepping == fw_info[i].stepping &&
361*4882a593Smuzhiyun si->substepping == fw_info[i].substepping) {
362*4882a593Smuzhiyun dmc_offset = fw_info[i].offset;
363*4882a593Smuzhiyun break;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (fw_info[i].stepping == '*' &&
367*4882a593Smuzhiyun fw_info[i].substepping == '*') {
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * In theory we should stop the search as generic
370*4882a593Smuzhiyun * entries should always come after the more specific
371*4882a593Smuzhiyun * ones, but let's continue to make sure to work even
372*4882a593Smuzhiyun * with "broken" firmwares. If we don't find a more
373*4882a593Smuzhiyun * specific one, then we use this entry
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun dmc_offset = fw_info[i].offset;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return dmc_offset;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
parse_csr_fw_dmc(struct intel_csr * csr,const struct intel_dmc_header_base * dmc_header,size_t rem_size)382*4882a593Smuzhiyun static u32 parse_csr_fw_dmc(struct intel_csr *csr,
383*4882a593Smuzhiyun const struct intel_dmc_header_base *dmc_header,
384*4882a593Smuzhiyun size_t rem_size)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun unsigned int header_len_bytes, dmc_header_size, payload_size, i;
387*4882a593Smuzhiyun const u32 *mmioaddr, *mmiodata;
388*4882a593Smuzhiyun u32 mmio_count, mmio_count_max;
389*4882a593Smuzhiyun u8 *payload;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
392*4882a593Smuzhiyun ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * Check if we can access common fields, we will checkc again below
396*4882a593Smuzhiyun * after we have read the version
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun if (rem_size < sizeof(struct intel_dmc_header_base))
399*4882a593Smuzhiyun goto error_truncated;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* Cope with small differences between v1 and v3 */
402*4882a593Smuzhiyun if (dmc_header->header_ver == 3) {
403*4882a593Smuzhiyun const struct intel_dmc_header_v3 *v3 =
404*4882a593Smuzhiyun (const struct intel_dmc_header_v3 *)dmc_header;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (rem_size < sizeof(struct intel_dmc_header_v3))
407*4882a593Smuzhiyun goto error_truncated;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun mmioaddr = v3->mmioaddr;
410*4882a593Smuzhiyun mmiodata = v3->mmiodata;
411*4882a593Smuzhiyun mmio_count = v3->mmio_count;
412*4882a593Smuzhiyun mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
413*4882a593Smuzhiyun /* header_len is in dwords */
414*4882a593Smuzhiyun header_len_bytes = dmc_header->header_len * 4;
415*4882a593Smuzhiyun dmc_header_size = sizeof(*v3);
416*4882a593Smuzhiyun } else if (dmc_header->header_ver == 1) {
417*4882a593Smuzhiyun const struct intel_dmc_header_v1 *v1 =
418*4882a593Smuzhiyun (const struct intel_dmc_header_v1 *)dmc_header;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (rem_size < sizeof(struct intel_dmc_header_v1))
421*4882a593Smuzhiyun goto error_truncated;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun mmioaddr = v1->mmioaddr;
424*4882a593Smuzhiyun mmiodata = v1->mmiodata;
425*4882a593Smuzhiyun mmio_count = v1->mmio_count;
426*4882a593Smuzhiyun mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
427*4882a593Smuzhiyun header_len_bytes = dmc_header->header_len;
428*4882a593Smuzhiyun dmc_header_size = sizeof(*v1);
429*4882a593Smuzhiyun } else {
430*4882a593Smuzhiyun DRM_ERROR("Unknown DMC fw header version: %u\n",
431*4882a593Smuzhiyun dmc_header->header_ver);
432*4882a593Smuzhiyun return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (header_len_bytes != dmc_header_size) {
436*4882a593Smuzhiyun DRM_ERROR("DMC firmware has wrong dmc header length "
437*4882a593Smuzhiyun "(%u bytes)\n", header_len_bytes);
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* Cache the dmc header info. */
442*4882a593Smuzhiyun if (mmio_count > mmio_count_max) {
443*4882a593Smuzhiyun DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun for (i = 0; i < mmio_count; i++) {
448*4882a593Smuzhiyun if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
449*4882a593Smuzhiyun mmioaddr[i] > CSR_MMIO_END_RANGE) {
450*4882a593Smuzhiyun DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
451*4882a593Smuzhiyun mmioaddr[i]);
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
455*4882a593Smuzhiyun csr->mmiodata[i] = mmiodata[i];
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun csr->mmio_count = mmio_count;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun rem_size -= header_len_bytes;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
462*4882a593Smuzhiyun payload_size = dmc_header->fw_size * 4;
463*4882a593Smuzhiyun if (rem_size < payload_size)
464*4882a593Smuzhiyun goto error_truncated;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (payload_size > csr->max_fw_size) {
467*4882a593Smuzhiyun DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
468*4882a593Smuzhiyun return 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun csr->dmc_fw_size = dmc_header->fw_size;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
473*4882a593Smuzhiyun if (!csr->dmc_payload) {
474*4882a593Smuzhiyun DRM_ERROR("Memory allocation failed for dmc payload\n");
475*4882a593Smuzhiyun return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun payload = (u8 *)(dmc_header) + header_len_bytes;
479*4882a593Smuzhiyun memcpy(csr->dmc_payload, payload, payload_size);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun return header_len_bytes + payload_size;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun error_truncated:
484*4882a593Smuzhiyun DRM_ERROR("Truncated DMC firmware, refusing.\n");
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun static u32
parse_csr_fw_package(struct intel_csr * csr,const struct intel_package_header * package_header,const struct stepping_info * si,size_t rem_size)489*4882a593Smuzhiyun parse_csr_fw_package(struct intel_csr *csr,
490*4882a593Smuzhiyun const struct intel_package_header *package_header,
491*4882a593Smuzhiyun const struct stepping_info *si,
492*4882a593Smuzhiyun size_t rem_size)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun u32 package_size = sizeof(struct intel_package_header);
495*4882a593Smuzhiyun u32 num_entries, max_entries, dmc_offset;
496*4882a593Smuzhiyun const struct intel_fw_info *fw_info;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (rem_size < package_size)
499*4882a593Smuzhiyun goto error_truncated;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (package_header->header_ver == 1) {
502*4882a593Smuzhiyun max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
503*4882a593Smuzhiyun } else if (package_header->header_ver == 2) {
504*4882a593Smuzhiyun max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
505*4882a593Smuzhiyun } else {
506*4882a593Smuzhiyun DRM_ERROR("DMC firmware has unknown header version %u\n",
507*4882a593Smuzhiyun package_header->header_ver);
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * We should always have space for max_entries,
513*4882a593Smuzhiyun * even if not all are used
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun package_size += max_entries * sizeof(struct intel_fw_info);
516*4882a593Smuzhiyun if (rem_size < package_size)
517*4882a593Smuzhiyun goto error_truncated;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (package_header->header_len * 4 != package_size) {
520*4882a593Smuzhiyun DRM_ERROR("DMC firmware has wrong package header length "
521*4882a593Smuzhiyun "(%u bytes)\n", package_size);
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun num_entries = package_header->num_entries;
526*4882a593Smuzhiyun if (WARN_ON(package_header->num_entries > max_entries))
527*4882a593Smuzhiyun num_entries = max_entries;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun fw_info = (const struct intel_fw_info *)
530*4882a593Smuzhiyun ((u8 *)package_header + sizeof(*package_header));
531*4882a593Smuzhiyun dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
532*4882a593Smuzhiyun package_header->header_ver);
533*4882a593Smuzhiyun if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
534*4882a593Smuzhiyun DRM_ERROR("DMC firmware not supported for %c stepping\n",
535*4882a593Smuzhiyun si->stepping);
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* dmc_offset is in dwords */
540*4882a593Smuzhiyun return package_size + dmc_offset * 4;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun error_truncated:
543*4882a593Smuzhiyun DRM_ERROR("Truncated DMC firmware, refusing.\n");
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Return number of bytes parsed or 0 on error */
parse_csr_fw_css(struct intel_csr * csr,struct intel_css_header * css_header,size_t rem_size)548*4882a593Smuzhiyun static u32 parse_csr_fw_css(struct intel_csr *csr,
549*4882a593Smuzhiyun struct intel_css_header *css_header,
550*4882a593Smuzhiyun size_t rem_size)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun if (rem_size < sizeof(struct intel_css_header)) {
553*4882a593Smuzhiyun DRM_ERROR("Truncated DMC firmware, refusing.\n");
554*4882a593Smuzhiyun return 0;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (sizeof(struct intel_css_header) !=
558*4882a593Smuzhiyun (css_header->header_len * 4)) {
559*4882a593Smuzhiyun DRM_ERROR("DMC firmware has wrong CSS header length "
560*4882a593Smuzhiyun "(%u bytes)\n",
561*4882a593Smuzhiyun (css_header->header_len * 4));
562*4882a593Smuzhiyun return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (csr->required_version &&
566*4882a593Smuzhiyun css_header->version != csr->required_version) {
567*4882a593Smuzhiyun DRM_INFO("Refusing to load DMC firmware v%u.%u,"
568*4882a593Smuzhiyun " please use v%u.%u\n",
569*4882a593Smuzhiyun CSR_VERSION_MAJOR(css_header->version),
570*4882a593Smuzhiyun CSR_VERSION_MINOR(css_header->version),
571*4882a593Smuzhiyun CSR_VERSION_MAJOR(csr->required_version),
572*4882a593Smuzhiyun CSR_VERSION_MINOR(csr->required_version));
573*4882a593Smuzhiyun return 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun csr->version = css_header->version;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun return sizeof(struct intel_css_header);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
parse_csr_fw(struct drm_i915_private * dev_priv,const struct firmware * fw)581*4882a593Smuzhiyun static void parse_csr_fw(struct drm_i915_private *dev_priv,
582*4882a593Smuzhiyun const struct firmware *fw)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun struct intel_css_header *css_header;
585*4882a593Smuzhiyun struct intel_package_header *package_header;
586*4882a593Smuzhiyun struct intel_dmc_header_base *dmc_header;
587*4882a593Smuzhiyun struct intel_csr *csr = &dev_priv->csr;
588*4882a593Smuzhiyun const struct stepping_info *si = intel_get_stepping_info(dev_priv);
589*4882a593Smuzhiyun u32 readcount = 0;
590*4882a593Smuzhiyun u32 r;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (!fw)
593*4882a593Smuzhiyun return;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* Extract CSS Header information */
596*4882a593Smuzhiyun css_header = (struct intel_css_header *)fw->data;
597*4882a593Smuzhiyun r = parse_csr_fw_css(csr, css_header, fw->size);
598*4882a593Smuzhiyun if (!r)
599*4882a593Smuzhiyun return;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun readcount += r;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* Extract Package Header information */
604*4882a593Smuzhiyun package_header = (struct intel_package_header *)&fw->data[readcount];
605*4882a593Smuzhiyun r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
606*4882a593Smuzhiyun if (!r)
607*4882a593Smuzhiyun return;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun readcount += r;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Extract dmc_header information */
612*4882a593Smuzhiyun dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
613*4882a593Smuzhiyun parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
intel_csr_runtime_pm_get(struct drm_i915_private * dev_priv)616*4882a593Smuzhiyun static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
619*4882a593Smuzhiyun dev_priv->csr.wakeref =
620*4882a593Smuzhiyun intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
intel_csr_runtime_pm_put(struct drm_i915_private * dev_priv)623*4882a593Smuzhiyun static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun intel_wakeref_t wakeref __maybe_unused =
626*4882a593Smuzhiyun fetch_and_zero(&dev_priv->csr.wakeref);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
csr_load_work_fn(struct work_struct * work)631*4882a593Smuzhiyun static void csr_load_work_fn(struct work_struct *work)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct drm_i915_private *dev_priv;
634*4882a593Smuzhiyun struct intel_csr *csr;
635*4882a593Smuzhiyun const struct firmware *fw = NULL;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun dev_priv = container_of(work, typeof(*dev_priv), csr.work);
638*4882a593Smuzhiyun csr = &dev_priv->csr;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
641*4882a593Smuzhiyun parse_csr_fw(dev_priv, fw);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (dev_priv->csr.dmc_payload) {
644*4882a593Smuzhiyun intel_csr_load_program(dev_priv);
645*4882a593Smuzhiyun intel_csr_runtime_pm_put(dev_priv);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun drm_info(&dev_priv->drm,
648*4882a593Smuzhiyun "Finished loading DMC firmware %s (v%u.%u)\n",
649*4882a593Smuzhiyun dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
650*4882a593Smuzhiyun CSR_VERSION_MINOR(csr->version));
651*4882a593Smuzhiyun } else {
652*4882a593Smuzhiyun drm_notice(&dev_priv->drm,
653*4882a593Smuzhiyun "Failed to load DMC firmware %s."
654*4882a593Smuzhiyun " Disabling runtime power management.\n",
655*4882a593Smuzhiyun csr->fw_path);
656*4882a593Smuzhiyun drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
657*4882a593Smuzhiyun INTEL_UC_FIRMWARE_URL);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun release_firmware(fw);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /**
664*4882a593Smuzhiyun * intel_csr_ucode_init() - initialize the firmware loading.
665*4882a593Smuzhiyun * @dev_priv: i915 drm device.
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * This function is called at the time of loading the display driver to read
668*4882a593Smuzhiyun * firmware from a .bin file and copied into a internal memory.
669*4882a593Smuzhiyun */
intel_csr_ucode_init(struct drm_i915_private * dev_priv)670*4882a593Smuzhiyun void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct intel_csr *csr = &dev_priv->csr;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (!HAS_CSR(dev_priv))
677*4882a593Smuzhiyun return;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /*
680*4882a593Smuzhiyun * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
681*4882a593Smuzhiyun * runtime-suspend.
682*4882a593Smuzhiyun *
683*4882a593Smuzhiyun * On error, we return with the rpm wakeref held to prevent runtime
684*4882a593Smuzhiyun * suspend as runtime suspend *requires* a working CSR for whatever
685*4882a593Smuzhiyun * reason.
686*4882a593Smuzhiyun */
687*4882a593Smuzhiyun intel_csr_runtime_pm_get(dev_priv);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (IS_ROCKETLAKE(dev_priv)) {
690*4882a593Smuzhiyun csr->fw_path = RKL_CSR_PATH;
691*4882a593Smuzhiyun csr->required_version = RKL_CSR_VERSION_REQUIRED;
692*4882a593Smuzhiyun csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
693*4882a593Smuzhiyun } else if (INTEL_GEN(dev_priv) >= 12) {
694*4882a593Smuzhiyun csr->fw_path = TGL_CSR_PATH;
695*4882a593Smuzhiyun csr->required_version = TGL_CSR_VERSION_REQUIRED;
696*4882a593Smuzhiyun /* Allow to load fw via parameter using the last known size */
697*4882a593Smuzhiyun csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
698*4882a593Smuzhiyun } else if (IS_GEN(dev_priv, 11)) {
699*4882a593Smuzhiyun csr->fw_path = ICL_CSR_PATH;
700*4882a593Smuzhiyun csr->required_version = ICL_CSR_VERSION_REQUIRED;
701*4882a593Smuzhiyun csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
702*4882a593Smuzhiyun } else if (IS_CANNONLAKE(dev_priv)) {
703*4882a593Smuzhiyun csr->fw_path = CNL_CSR_PATH;
704*4882a593Smuzhiyun csr->required_version = CNL_CSR_VERSION_REQUIRED;
705*4882a593Smuzhiyun csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
706*4882a593Smuzhiyun } else if (IS_GEMINILAKE(dev_priv)) {
707*4882a593Smuzhiyun csr->fw_path = GLK_CSR_PATH;
708*4882a593Smuzhiyun csr->required_version = GLK_CSR_VERSION_REQUIRED;
709*4882a593Smuzhiyun csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
710*4882a593Smuzhiyun } else if (IS_KABYLAKE(dev_priv) ||
711*4882a593Smuzhiyun IS_COFFEELAKE(dev_priv) ||
712*4882a593Smuzhiyun IS_COMETLAKE(dev_priv)) {
713*4882a593Smuzhiyun csr->fw_path = KBL_CSR_PATH;
714*4882a593Smuzhiyun csr->required_version = KBL_CSR_VERSION_REQUIRED;
715*4882a593Smuzhiyun csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
716*4882a593Smuzhiyun } else if (IS_SKYLAKE(dev_priv)) {
717*4882a593Smuzhiyun csr->fw_path = SKL_CSR_PATH;
718*4882a593Smuzhiyun csr->required_version = SKL_CSR_VERSION_REQUIRED;
719*4882a593Smuzhiyun csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
720*4882a593Smuzhiyun } else if (IS_BROXTON(dev_priv)) {
721*4882a593Smuzhiyun csr->fw_path = BXT_CSR_PATH;
722*4882a593Smuzhiyun csr->required_version = BXT_CSR_VERSION_REQUIRED;
723*4882a593Smuzhiyun csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (dev_priv->params.dmc_firmware_path) {
727*4882a593Smuzhiyun if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
728*4882a593Smuzhiyun csr->fw_path = NULL;
729*4882a593Smuzhiyun drm_info(&dev_priv->drm,
730*4882a593Smuzhiyun "Disabling CSR firmware and runtime PM\n");
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun csr->fw_path = dev_priv->params.dmc_firmware_path;
735*4882a593Smuzhiyun /* Bypass version check for firmware override. */
736*4882a593Smuzhiyun csr->required_version = 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (csr->fw_path == NULL) {
740*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm,
741*4882a593Smuzhiyun "No known CSR firmware for platform, disabling runtime PM\n");
742*4882a593Smuzhiyun return;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
746*4882a593Smuzhiyun schedule_work(&dev_priv->csr.work);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /**
750*4882a593Smuzhiyun * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
751*4882a593Smuzhiyun * @dev_priv: i915 drm device
752*4882a593Smuzhiyun *
753*4882a593Smuzhiyun * Prepare the DMC firmware before entering system suspend. This includes
754*4882a593Smuzhiyun * flushing pending work items and releasing any resources acquired during
755*4882a593Smuzhiyun * init.
756*4882a593Smuzhiyun */
intel_csr_ucode_suspend(struct drm_i915_private * dev_priv)757*4882a593Smuzhiyun void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun if (!HAS_CSR(dev_priv))
760*4882a593Smuzhiyun return;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun flush_work(&dev_priv->csr.work);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Drop the reference held in case DMC isn't loaded. */
765*4882a593Smuzhiyun if (!dev_priv->csr.dmc_payload)
766*4882a593Smuzhiyun intel_csr_runtime_pm_put(dev_priv);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun * intel_csr_ucode_resume() - init CSR firmware during system resume
771*4882a593Smuzhiyun * @dev_priv: i915 drm device
772*4882a593Smuzhiyun *
773*4882a593Smuzhiyun * Reinitialize the DMC firmware during system resume, reacquiring any
774*4882a593Smuzhiyun * resources released in intel_csr_ucode_suspend().
775*4882a593Smuzhiyun */
intel_csr_ucode_resume(struct drm_i915_private * dev_priv)776*4882a593Smuzhiyun void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun if (!HAS_CSR(dev_priv))
779*4882a593Smuzhiyun return;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun * Reacquire the reference to keep RPM disabled in case DMC isn't
783*4882a593Smuzhiyun * loaded.
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun if (!dev_priv->csr.dmc_payload)
786*4882a593Smuzhiyun intel_csr_runtime_pm_get(dev_priv);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /**
790*4882a593Smuzhiyun * intel_csr_ucode_fini() - unload the CSR firmware.
791*4882a593Smuzhiyun * @dev_priv: i915 drm device.
792*4882a593Smuzhiyun *
793*4882a593Smuzhiyun * Firmmware unloading includes freeing the internal memory and reset the
794*4882a593Smuzhiyun * firmware loading status.
795*4882a593Smuzhiyun */
intel_csr_ucode_fini(struct drm_i915_private * dev_priv)796*4882a593Smuzhiyun void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun if (!HAS_CSR(dev_priv))
799*4882a593Smuzhiyun return;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun intel_csr_ucode_suspend(dev_priv);
802*4882a593Smuzhiyun drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun kfree(dev_priv->csr.dmc_payload);
805*4882a593Smuzhiyun }
806