1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PowerNV OPAL high level interfaces
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2011 IBM Corp.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "opal: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/printk.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_fdt.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/of_address.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/notifier.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/kobject.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/memblock.h>
23*4882a593Smuzhiyun #include <linux/kthread.h>
24*4882a593Smuzhiyun #include <linux/freezer.h>
25*4882a593Smuzhiyun #include <linux/kmsg_dump.h>
26*4882a593Smuzhiyun #include <linux/console.h>
27*4882a593Smuzhiyun #include <linux/sched/debug.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <asm/machdep.h>
30*4882a593Smuzhiyun #include <asm/opal.h>
31*4882a593Smuzhiyun #include <asm/firmware.h>
32*4882a593Smuzhiyun #include <asm/mce.h>
33*4882a593Smuzhiyun #include <asm/imc-pmu.h>
34*4882a593Smuzhiyun #include <asm/bug.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "powernv.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define OPAL_MSG_QUEUE_MAX 16
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct opal_msg_node {
41*4882a593Smuzhiyun struct list_head list;
42*4882a593Smuzhiyun struct opal_msg msg;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static DEFINE_SPINLOCK(msg_list_lock);
46*4882a593Smuzhiyun static LIST_HEAD(msg_list);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* /sys/firmware/opal */
49*4882a593Smuzhiyun struct kobject *opal_kobj;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct opal {
52*4882a593Smuzhiyun u64 base;
53*4882a593Smuzhiyun u64 entry;
54*4882a593Smuzhiyun u64 size;
55*4882a593Smuzhiyun } opal;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun struct mcheck_recoverable_range {
58*4882a593Smuzhiyun u64 start_addr;
59*4882a593Smuzhiyun u64 end_addr;
60*4882a593Smuzhiyun u64 recover_addr;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static int msg_list_size;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static struct mcheck_recoverable_range *mc_recoverable_range;
66*4882a593Smuzhiyun static int mc_recoverable_range_len;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct device_node *opal_node;
69*4882a593Smuzhiyun static DEFINE_SPINLOCK(opal_write_lock);
70*4882a593Smuzhiyun static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
71*4882a593Smuzhiyun static uint32_t opal_heartbeat;
72*4882a593Smuzhiyun static struct task_struct *kopald_tsk;
73*4882a593Smuzhiyun static struct opal_msg *opal_msg;
74*4882a593Smuzhiyun static u32 opal_msg_size __ro_after_init;
75*4882a593Smuzhiyun
opal_configure_cores(void)76*4882a593Smuzhiyun void opal_configure_cores(void)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun u64 reinit_flags = 0;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * It will preserve non volatile GPRs and HSPRG0/1. It will
83*4882a593Smuzhiyun * also restore HIDs and other SPRs to their original value
84*4882a593Smuzhiyun * but it might clobber a bunch.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #ifdef __BIG_ENDIAN__
87*4882a593Smuzhiyun reinit_flags |= OPAL_REINIT_CPUS_HILE_BE;
88*4882a593Smuzhiyun #else
89*4882a593Smuzhiyun reinit_flags |= OPAL_REINIT_CPUS_HILE_LE;
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * POWER9 always support running hash:
94*4882a593Smuzhiyun * ie. Host hash supports hash guests
95*4882a593Smuzhiyun * Host radix supports hash/radix guests
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
98*4882a593Smuzhiyun reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
99*4882a593Smuzhiyun if (early_radix_enabled())
100*4882a593Smuzhiyun reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun opal_reinit_cpus(reinit_flags);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Restore some bits */
106*4882a593Smuzhiyun if (cur_cpu_spec->cpu_restore)
107*4882a593Smuzhiyun cur_cpu_spec->cpu_restore();
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
early_init_dt_scan_opal(unsigned long node,const char * uname,int depth,void * data)110*4882a593Smuzhiyun int __init early_init_dt_scan_opal(unsigned long node,
111*4882a593Smuzhiyun const char *uname, int depth, void *data)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun const void *basep, *entryp, *sizep;
114*4882a593Smuzhiyun int basesz, entrysz, runtimesz;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
120*4882a593Smuzhiyun entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
121*4882a593Smuzhiyun sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (!basep || !entryp || !sizep)
124*4882a593Smuzhiyun return 1;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun opal.base = of_read_number(basep, basesz/4);
127*4882a593Smuzhiyun opal.entry = of_read_number(entryp, entrysz/4);
128*4882a593Smuzhiyun opal.size = of_read_number(sizep, runtimesz/4);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
131*4882a593Smuzhiyun opal.base, basep, basesz);
132*4882a593Smuzhiyun pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
133*4882a593Smuzhiyun opal.entry, entryp, entrysz);
134*4882a593Smuzhiyun pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
135*4882a593Smuzhiyun opal.size, sizep, runtimesz);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
138*4882a593Smuzhiyun powerpc_firmware_features |= FW_FEATURE_OPAL;
139*4882a593Smuzhiyun pr_debug("OPAL detected !\n");
140*4882a593Smuzhiyun } else {
141*4882a593Smuzhiyun panic("OPAL != V3 detected, no longer supported.\n");
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return 1;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
early_init_dt_scan_recoverable_ranges(unsigned long node,const char * uname,int depth,void * data)147*4882a593Smuzhiyun int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
148*4882a593Smuzhiyun const char *uname, int depth, void *data)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun int i, psize, size;
151*4882a593Smuzhiyun const __be32 *prop;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (!prop)
159*4882a593Smuzhiyun return 1;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun pr_debug("Found machine check recoverable ranges.\n");
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Calculate number of available entries.
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * Each recoverable address range entry is (start address, len,
167*4882a593Smuzhiyun * recovery address), 2 cells each for start and recovery address,
168*4882a593Smuzhiyun * 1 cell for len, totalling 5 cells per entry.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Sanity check */
173*4882a593Smuzhiyun if (!mc_recoverable_range_len)
174*4882a593Smuzhiyun return 1;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Size required to hold all the entries. */
177*4882a593Smuzhiyun size = mc_recoverable_range_len *
178*4882a593Smuzhiyun sizeof(struct mcheck_recoverable_range);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Allocate a buffer to hold the MC recoverable ranges.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
184*4882a593Smuzhiyun if (!mc_recoverable_range)
185*4882a593Smuzhiyun panic("%s: Failed to allocate %u bytes align=0x%lx\n",
186*4882a593Smuzhiyun __func__, size, __alignof__(u64));
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun for (i = 0; i < mc_recoverable_range_len; i++) {
189*4882a593Smuzhiyun mc_recoverable_range[i].start_addr =
190*4882a593Smuzhiyun of_read_number(prop + (i * 5) + 0, 2);
191*4882a593Smuzhiyun mc_recoverable_range[i].end_addr =
192*4882a593Smuzhiyun mc_recoverable_range[i].start_addr +
193*4882a593Smuzhiyun of_read_number(prop + (i * 5) + 2, 1);
194*4882a593Smuzhiyun mc_recoverable_range[i].recover_addr =
195*4882a593Smuzhiyun of_read_number(prop + (i * 5) + 3, 2);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
198*4882a593Smuzhiyun mc_recoverable_range[i].start_addr,
199*4882a593Smuzhiyun mc_recoverable_range[i].end_addr,
200*4882a593Smuzhiyun mc_recoverable_range[i].recover_addr);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun return 1;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
opal_register_exception_handlers(void)205*4882a593Smuzhiyun static int __init opal_register_exception_handlers(void)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun #ifdef __BIG_ENDIAN__
208*4882a593Smuzhiyun u64 glue;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
211*4882a593Smuzhiyun return -ENODEV;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Hookup some exception handlers except machine check. We use the
214*4882a593Smuzhiyun * fwnmi area at 0x7000 to provide the glue space to OPAL
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun glue = 0x7000;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * Only ancient OPAL firmware requires this.
220*4882a593Smuzhiyun * Specifically, firmware from FW810.00 (released June 2014)
221*4882a593Smuzhiyun * through FW810.20 (Released October 2014).
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * Check if we are running on newer (post Oct 2014) firmware that
224*4882a593Smuzhiyun * exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to
225*4882a593Smuzhiyun * patch the HMI interrupt and we catch it directly in Linux.
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * For older firmware (i.e < FW810.20), we fallback to old behavior and
228*4882a593Smuzhiyun * let OPAL patch the HMI vector and handle it inside OPAL firmware.
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * For newer firmware we catch/handle the HMI directly in Linux.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun if (!opal_check_token(OPAL_HANDLE_HMI)) {
233*4882a593Smuzhiyun pr_info("Old firmware detected, OPAL handles HMIs.\n");
234*4882a593Smuzhiyun opal_register_exception_handler(
235*4882a593Smuzhiyun OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
236*4882a593Smuzhiyun 0, glue);
237*4882a593Smuzhiyun glue += 128;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * Only applicable to ancient firmware, all modern
242*4882a593Smuzhiyun * (post March 2015/skiboot 5.0) firmware will just return
243*4882a593Smuzhiyun * OPAL_UNSUPPORTED.
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun machine_early_initcall(powernv, opal_register_exception_handlers);
251*4882a593Smuzhiyun
queue_replay_msg(void * msg)252*4882a593Smuzhiyun static void queue_replay_msg(void *msg)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct opal_msg_node *msg_node;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (msg_list_size < OPAL_MSG_QUEUE_MAX) {
257*4882a593Smuzhiyun msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
258*4882a593Smuzhiyun if (msg_node) {
259*4882a593Smuzhiyun INIT_LIST_HEAD(&msg_node->list);
260*4882a593Smuzhiyun memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
261*4882a593Smuzhiyun list_add_tail(&msg_node->list, &msg_list);
262*4882a593Smuzhiyun msg_list_size++;
263*4882a593Smuzhiyun } else
264*4882a593Smuzhiyun pr_warn_once("message queue no memory\n");
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (msg_list_size >= OPAL_MSG_QUEUE_MAX)
267*4882a593Smuzhiyun pr_warn_once("message queue full\n");
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
dequeue_replay_msg(enum opal_msg_type msg_type)271*4882a593Smuzhiyun static void dequeue_replay_msg(enum opal_msg_type msg_type)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct opal_msg_node *msg_node, *tmp;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun list_for_each_entry_safe(msg_node, tmp, &msg_list, list) {
276*4882a593Smuzhiyun if (be32_to_cpu(msg_node->msg.msg_type) != msg_type)
277*4882a593Smuzhiyun continue;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
280*4882a593Smuzhiyun msg_type,
281*4882a593Smuzhiyun &msg_node->msg);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun list_del(&msg_node->list);
284*4882a593Smuzhiyun kfree(msg_node);
285*4882a593Smuzhiyun msg_list_size--;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * Opal message notifier based on message type. Allow subscribers to get
291*4882a593Smuzhiyun * notified for specific messgae type.
292*4882a593Smuzhiyun */
opal_message_notifier_register(enum opal_msg_type msg_type,struct notifier_block * nb)293*4882a593Smuzhiyun int opal_message_notifier_register(enum opal_msg_type msg_type,
294*4882a593Smuzhiyun struct notifier_block *nb)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun int ret;
297*4882a593Smuzhiyun unsigned long flags;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
300*4882a593Smuzhiyun pr_warn("%s: Invalid arguments, msg_type:%d\n",
301*4882a593Smuzhiyun __func__, msg_type);
302*4882a593Smuzhiyun return -EINVAL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun spin_lock_irqsave(&msg_list_lock, flags);
306*4882a593Smuzhiyun ret = atomic_notifier_chain_register(
307*4882a593Smuzhiyun &opal_msg_notifier_head[msg_type], nb);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun * If the registration succeeded, replay any queued messages that came
311*4882a593Smuzhiyun * in prior to the notifier chain registration. msg_list_lock held here
312*4882a593Smuzhiyun * to ensure they're delivered prior to any subsequent messages.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun if (ret == 0)
315*4882a593Smuzhiyun dequeue_replay_msg(msg_type);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun spin_unlock_irqrestore(&msg_list_lock, flags);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return ret;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_message_notifier_register);
322*4882a593Smuzhiyun
opal_message_notifier_unregister(enum opal_msg_type msg_type,struct notifier_block * nb)323*4882a593Smuzhiyun int opal_message_notifier_unregister(enum opal_msg_type msg_type,
324*4882a593Smuzhiyun struct notifier_block *nb)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun return atomic_notifier_chain_unregister(
327*4882a593Smuzhiyun &opal_msg_notifier_head[msg_type], nb);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
330*4882a593Smuzhiyun
opal_message_do_notify(uint32_t msg_type,void * msg)331*4882a593Smuzhiyun static void opal_message_do_notify(uint32_t msg_type, void *msg)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun unsigned long flags;
334*4882a593Smuzhiyun bool queued = false;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun spin_lock_irqsave(&msg_list_lock, flags);
337*4882a593Smuzhiyun if (opal_msg_notifier_head[msg_type].head == NULL) {
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Queue up the msg since no notifiers have registered
340*4882a593Smuzhiyun * yet for this msg_type.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun queue_replay_msg(msg);
343*4882a593Smuzhiyun queued = true;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun spin_unlock_irqrestore(&msg_list_lock, flags);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (queued)
348*4882a593Smuzhiyun return;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* notify subscribers */
351*4882a593Smuzhiyun atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
352*4882a593Smuzhiyun msg_type, msg);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
opal_handle_message(void)355*4882a593Smuzhiyun static void opal_handle_message(void)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun s64 ret;
358*4882a593Smuzhiyun u32 type;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun ret = opal_get_msg(__pa(opal_msg), opal_msg_size);
361*4882a593Smuzhiyun /* No opal message pending. */
362*4882a593Smuzhiyun if (ret == OPAL_RESOURCE)
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* check for errors. */
366*4882a593Smuzhiyun if (ret) {
367*4882a593Smuzhiyun pr_warn("%s: Failed to retrieve opal message, err=%lld\n",
368*4882a593Smuzhiyun __func__, ret);
369*4882a593Smuzhiyun return;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun type = be32_to_cpu(opal_msg->msg_type);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Sanity check */
375*4882a593Smuzhiyun if (type >= OPAL_MSG_TYPE_MAX) {
376*4882a593Smuzhiyun pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
377*4882a593Smuzhiyun return;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun opal_message_do_notify(type, (void *)opal_msg);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
opal_message_notify(int irq,void * data)382*4882a593Smuzhiyun static irqreturn_t opal_message_notify(int irq, void *data)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun opal_handle_message();
385*4882a593Smuzhiyun return IRQ_HANDLED;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
opal_message_init(struct device_node * opal_node)388*4882a593Smuzhiyun static int __init opal_message_init(struct device_node *opal_node)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun int ret, i, irq;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size);
393*4882a593Smuzhiyun if (ret) {
394*4882a593Smuzhiyun pr_notice("Failed to read opal-msg-size property\n");
395*4882a593Smuzhiyun opal_msg_size = sizeof(struct opal_msg);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
399*4882a593Smuzhiyun if (!opal_msg) {
400*4882a593Smuzhiyun opal_msg_size = sizeof(struct opal_msg);
401*4882a593Smuzhiyun /* Try to allocate fixed message size */
402*4882a593Smuzhiyun opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
403*4882a593Smuzhiyun BUG_ON(opal_msg == NULL);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
407*4882a593Smuzhiyun ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
410*4882a593Smuzhiyun if (!irq) {
411*4882a593Smuzhiyun pr_err("%s: Can't register OPAL event irq (%d)\n",
412*4882a593Smuzhiyun __func__, irq);
413*4882a593Smuzhiyun return irq;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ret = request_irq(irq, opal_message_notify,
417*4882a593Smuzhiyun IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
418*4882a593Smuzhiyun if (ret) {
419*4882a593Smuzhiyun pr_err("%s: Can't request OPAL event irq (%d)\n",
420*4882a593Smuzhiyun __func__, ret);
421*4882a593Smuzhiyun return ret;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun return 0;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
opal_get_chars(uint32_t vtermno,char * buf,int count)427*4882a593Smuzhiyun int opal_get_chars(uint32_t vtermno, char *buf, int count)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun s64 rc;
430*4882a593Smuzhiyun __be64 evt, len;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (!opal.entry)
433*4882a593Smuzhiyun return -ENODEV;
434*4882a593Smuzhiyun opal_poll_events(&evt);
435*4882a593Smuzhiyun if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun len = cpu_to_be64(count);
438*4882a593Smuzhiyun rc = opal_console_read(vtermno, &len, buf);
439*4882a593Smuzhiyun if (rc == OPAL_SUCCESS)
440*4882a593Smuzhiyun return be64_to_cpu(len);
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
__opal_put_chars(uint32_t vtermno,const char * data,int total_len,bool atomic)444*4882a593Smuzhiyun static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun unsigned long flags = 0 /* shut up gcc */;
447*4882a593Smuzhiyun int written;
448*4882a593Smuzhiyun __be64 olen;
449*4882a593Smuzhiyun s64 rc;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!opal.entry)
452*4882a593Smuzhiyun return -ENODEV;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (atomic)
455*4882a593Smuzhiyun spin_lock_irqsave(&opal_write_lock, flags);
456*4882a593Smuzhiyun rc = opal_console_write_buffer_space(vtermno, &olen);
457*4882a593Smuzhiyun if (rc || be64_to_cpu(olen) < total_len) {
458*4882a593Smuzhiyun /* Closed -> drop characters */
459*4882a593Smuzhiyun if (rc)
460*4882a593Smuzhiyun written = total_len;
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun written = -EAGAIN;
463*4882a593Smuzhiyun goto out;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Should not get a partial write here because space is available. */
467*4882a593Smuzhiyun olen = cpu_to_be64(total_len);
468*4882a593Smuzhiyun rc = opal_console_write(vtermno, &olen, data);
469*4882a593Smuzhiyun if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
470*4882a593Smuzhiyun if (rc == OPAL_BUSY_EVENT)
471*4882a593Smuzhiyun opal_poll_events(NULL);
472*4882a593Smuzhiyun written = -EAGAIN;
473*4882a593Smuzhiyun goto out;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Closed or other error drop */
477*4882a593Smuzhiyun if (rc != OPAL_SUCCESS) {
478*4882a593Smuzhiyun written = opal_error_code(rc);
479*4882a593Smuzhiyun goto out;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun written = be64_to_cpu(olen);
483*4882a593Smuzhiyun if (written < total_len) {
484*4882a593Smuzhiyun if (atomic) {
485*4882a593Smuzhiyun /* Should not happen */
486*4882a593Smuzhiyun pr_warn("atomic console write returned partial "
487*4882a593Smuzhiyun "len=%d written=%d\n", total_len, written);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun if (!written)
490*4882a593Smuzhiyun written = -EAGAIN;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun out:
494*4882a593Smuzhiyun if (atomic)
495*4882a593Smuzhiyun spin_unlock_irqrestore(&opal_write_lock, flags);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun return written;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
opal_put_chars(uint32_t vtermno,const char * data,int total_len)500*4882a593Smuzhiyun int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun return __opal_put_chars(vtermno, data, total_len, false);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * opal_put_chars_atomic will not perform partial-writes. Data will be
507*4882a593Smuzhiyun * atomically written to the terminal or not at all. This is not strictly
508*4882a593Smuzhiyun * true at the moment because console space can race with OPAL's console
509*4882a593Smuzhiyun * writes.
510*4882a593Smuzhiyun */
opal_put_chars_atomic(uint32_t vtermno,const char * data,int total_len)511*4882a593Smuzhiyun int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun return __opal_put_chars(vtermno, data, total_len, true);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
__opal_flush_console(uint32_t vtermno)516*4882a593Smuzhiyun static s64 __opal_flush_console(uint32_t vtermno)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun s64 rc;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
521*4882a593Smuzhiyun __be64 evt;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
525*4882a593Smuzhiyun * the console can still be flushed by calling the polling
526*4882a593Smuzhiyun * function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun opal_poll_events(&evt);
531*4882a593Smuzhiyun if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT))
532*4882a593Smuzhiyun return OPAL_SUCCESS;
533*4882a593Smuzhiyun return OPAL_BUSY;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun } else {
536*4882a593Smuzhiyun rc = opal_console_flush(vtermno);
537*4882a593Smuzhiyun if (rc == OPAL_BUSY_EVENT) {
538*4882a593Smuzhiyun opal_poll_events(NULL);
539*4882a593Smuzhiyun rc = OPAL_BUSY;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun return rc;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * opal_flush_console spins until the console is flushed
548*4882a593Smuzhiyun */
opal_flush_console(uint32_t vtermno)549*4882a593Smuzhiyun int opal_flush_console(uint32_t vtermno)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun for (;;) {
552*4882a593Smuzhiyun s64 rc = __opal_flush_console(vtermno);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
555*4882a593Smuzhiyun mdelay(1);
556*4882a593Smuzhiyun continue;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun return opal_error_code(rc);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * opal_flush_chars is an hvc interface that sleeps until the console is
565*4882a593Smuzhiyun * flushed if wait, otherwise it will return -EBUSY if the console has data,
566*4882a593Smuzhiyun * -EAGAIN if it has data and some of it was flushed.
567*4882a593Smuzhiyun */
opal_flush_chars(uint32_t vtermno,bool wait)568*4882a593Smuzhiyun int opal_flush_chars(uint32_t vtermno, bool wait)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun for (;;) {
571*4882a593Smuzhiyun s64 rc = __opal_flush_console(vtermno);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
574*4882a593Smuzhiyun if (wait) {
575*4882a593Smuzhiyun msleep(OPAL_BUSY_DELAY_MS);
576*4882a593Smuzhiyun continue;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun if (rc == OPAL_PARTIAL)
579*4882a593Smuzhiyun return -EAGAIN;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun return opal_error_code(rc);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
opal_recover_mce(struct pt_regs * regs,struct machine_check_event * evt)586*4882a593Smuzhiyun static int opal_recover_mce(struct pt_regs *regs,
587*4882a593Smuzhiyun struct machine_check_event *evt)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun int recovered = 0;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (!(regs->msr & MSR_RI)) {
592*4882a593Smuzhiyun /* If MSR_RI isn't set, we cannot recover */
593*4882a593Smuzhiyun pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
594*4882a593Smuzhiyun recovered = 0;
595*4882a593Smuzhiyun } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
596*4882a593Smuzhiyun /* Platform corrected itself */
597*4882a593Smuzhiyun recovered = 1;
598*4882a593Smuzhiyun } else if (evt->severity == MCE_SEV_FATAL) {
599*4882a593Smuzhiyun /* Fatal machine check */
600*4882a593Smuzhiyun pr_err("Machine check interrupt is fatal\n");
601*4882a593Smuzhiyun recovered = 0;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (!recovered && evt->sync_error) {
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Try to kill processes if we get a synchronous machine check
607*4882a593Smuzhiyun * (e.g., one caused by execution of this instruction). This
608*4882a593Smuzhiyun * will devolve into a panic if we try to kill init or are in
609*4882a593Smuzhiyun * an interrupt etc.
610*4882a593Smuzhiyun *
611*4882a593Smuzhiyun * TODO: Queue up this address for hwpoisioning later.
612*4882a593Smuzhiyun * TODO: This is not quite right for d-side machine
613*4882a593Smuzhiyun * checks ->nip is not necessarily the important
614*4882a593Smuzhiyun * address.
615*4882a593Smuzhiyun */
616*4882a593Smuzhiyun if ((user_mode(regs))) {
617*4882a593Smuzhiyun _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
618*4882a593Smuzhiyun recovered = 1;
619*4882a593Smuzhiyun } else if (die_will_crash()) {
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * die() would kill the kernel, so better to go via
622*4882a593Smuzhiyun * the platform reboot code that will log the
623*4882a593Smuzhiyun * machine check.
624*4882a593Smuzhiyun */
625*4882a593Smuzhiyun recovered = 0;
626*4882a593Smuzhiyun } else {
627*4882a593Smuzhiyun die("Machine check", regs, SIGBUS);
628*4882a593Smuzhiyun recovered = 1;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun return recovered;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
pnv_platform_error_reboot(struct pt_regs * regs,const char * msg)635*4882a593Smuzhiyun void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun panic_flush_kmsg_start();
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun pr_emerg("Hardware platform error: %s\n", msg);
640*4882a593Smuzhiyun if (regs)
641*4882a593Smuzhiyun show_regs(regs);
642*4882a593Smuzhiyun smp_send_stop();
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun panic_flush_kmsg_end();
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Don't bother to shut things down because this will
648*4882a593Smuzhiyun * xstop the system.
649*4882a593Smuzhiyun */
650*4882a593Smuzhiyun if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg)
651*4882a593Smuzhiyun == OPAL_UNSUPPORTED) {
652*4882a593Smuzhiyun pr_emerg("Reboot type %d not supported for %s\n",
653*4882a593Smuzhiyun OPAL_REBOOT_PLATFORM_ERROR, msg);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /*
657*4882a593Smuzhiyun * We reached here. There can be three possibilities:
658*4882a593Smuzhiyun * 1. We are running on a firmware level that do not support
659*4882a593Smuzhiyun * opal_cec_reboot2()
660*4882a593Smuzhiyun * 2. We are running on a firmware level that do not support
661*4882a593Smuzhiyun * OPAL_REBOOT_PLATFORM_ERROR reboot type.
662*4882a593Smuzhiyun * 3. We are running on FSP based system that does not need
663*4882a593Smuzhiyun * opal to trigger checkstop explicitly for error analysis.
664*4882a593Smuzhiyun * The FSP PRD component would have already got notified
665*4882a593Smuzhiyun * about this error through other channels.
666*4882a593Smuzhiyun * 4. We are running on a newer skiboot that by default does
667*4882a593Smuzhiyun * not cause a checkstop, drops us back to the kernel to
668*4882a593Smuzhiyun * extract context and state at the time of the error.
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun panic(msg);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
opal_machine_check(struct pt_regs * regs)674*4882a593Smuzhiyun int opal_machine_check(struct pt_regs *regs)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun struct machine_check_event evt;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
679*4882a593Smuzhiyun return 0;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Print things out */
682*4882a593Smuzhiyun if (evt.version != MCE_V1) {
683*4882a593Smuzhiyun pr_err("Machine Check Exception, Unknown event version %d !\n",
684*4882a593Smuzhiyun evt.version);
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun machine_check_print_event_info(&evt, user_mode(regs), false);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (opal_recover_mce(regs, &evt))
690*4882a593Smuzhiyun return 1;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception");
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* Early hmi handler called in real mode. */
opal_hmi_exception_early(struct pt_regs * regs)696*4882a593Smuzhiyun int opal_hmi_exception_early(struct pt_regs *regs)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun s64 rc;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * call opal hmi handler. Pass paca address as token.
702*4882a593Smuzhiyun * The return value OPAL_SUCCESS is an indication that there is
703*4882a593Smuzhiyun * an HMI event generated waiting to pull by Linux.
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun rc = opal_handle_hmi();
706*4882a593Smuzhiyun if (rc == OPAL_SUCCESS) {
707*4882a593Smuzhiyun local_paca->hmi_event_available = 1;
708*4882a593Smuzhiyun return 1;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun return 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
opal_hmi_exception_early2(struct pt_regs * regs)713*4882a593Smuzhiyun int opal_hmi_exception_early2(struct pt_regs *regs)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun s64 rc;
716*4882a593Smuzhiyun __be64 out_flags;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /*
719*4882a593Smuzhiyun * call opal hmi handler.
720*4882a593Smuzhiyun * Check 64-bit flag mask to find out if an event was generated,
721*4882a593Smuzhiyun * and whether TB is still valid or not etc.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun rc = opal_handle_hmi2(&out_flags);
724*4882a593Smuzhiyun if (rc != OPAL_SUCCESS)
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT)
728*4882a593Smuzhiyun local_paca->hmi_event_available = 1;
729*4882a593Smuzhiyun if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
730*4882a593Smuzhiyun tb_invalid = true;
731*4882a593Smuzhiyun return 1;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* HMI exception handler called in virtual mode when irqs are next enabled. */
opal_handle_hmi_exception(struct pt_regs * regs)735*4882a593Smuzhiyun int opal_handle_hmi_exception(struct pt_regs *regs)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * Check if HMI event is available.
739*4882a593Smuzhiyun * if Yes, then wake kopald to process them.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun if (!local_paca->hmi_event_available)
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun local_paca->hmi_event_available = 0;
745*4882a593Smuzhiyun opal_wake_poller();
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun return 1;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
find_recovery_address(uint64_t nip)750*4882a593Smuzhiyun static uint64_t find_recovery_address(uint64_t nip)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun int i;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun for (i = 0; i < mc_recoverable_range_len; i++)
755*4882a593Smuzhiyun if ((nip >= mc_recoverable_range[i].start_addr) &&
756*4882a593Smuzhiyun (nip < mc_recoverable_range[i].end_addr))
757*4882a593Smuzhiyun return mc_recoverable_range[i].recover_addr;
758*4882a593Smuzhiyun return 0;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
opal_mce_check_early_recovery(struct pt_regs * regs)761*4882a593Smuzhiyun bool opal_mce_check_early_recovery(struct pt_regs *regs)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun uint64_t recover_addr = 0;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (!opal.base || !opal.size)
766*4882a593Smuzhiyun goto out;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if ((regs->nip >= opal.base) &&
769*4882a593Smuzhiyun (regs->nip < (opal.base + opal.size)))
770*4882a593Smuzhiyun recover_addr = find_recovery_address(regs->nip);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * Setup regs->nip to rfi into fixup address.
774*4882a593Smuzhiyun */
775*4882a593Smuzhiyun if (recover_addr)
776*4882a593Smuzhiyun regs->nip = recover_addr;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun out:
779*4882a593Smuzhiyun return !!recover_addr;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
opal_sysfs_init(void)782*4882a593Smuzhiyun static int opal_sysfs_init(void)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun opal_kobj = kobject_create_and_add("opal", firmware_kobj);
785*4882a593Smuzhiyun if (!opal_kobj) {
786*4882a593Smuzhiyun pr_warn("kobject_create_and_add opal failed\n");
787*4882a593Smuzhiyun return -ENOMEM;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun return 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
export_attr_read(struct file * fp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)793*4882a593Smuzhiyun static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
794*4882a593Smuzhiyun struct bin_attribute *bin_attr, char *buf,
795*4882a593Smuzhiyun loff_t off, size_t count)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun return memory_read_from_buffer(buf, count, &off, bin_attr->private,
798*4882a593Smuzhiyun bin_attr->size);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
opal_add_one_export(struct kobject * parent,const char * export_name,struct device_node * np,const char * prop_name)801*4882a593Smuzhiyun static int opal_add_one_export(struct kobject *parent, const char *export_name,
802*4882a593Smuzhiyun struct device_node *np, const char *prop_name)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct bin_attribute *attr = NULL;
805*4882a593Smuzhiyun const char *name = NULL;
806*4882a593Smuzhiyun u64 vals[2];
807*4882a593Smuzhiyun int rc;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun rc = of_property_read_u64_array(np, prop_name, &vals[0], 2);
810*4882a593Smuzhiyun if (rc)
811*4882a593Smuzhiyun goto out;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun attr = kzalloc(sizeof(*attr), GFP_KERNEL);
814*4882a593Smuzhiyun if (!attr) {
815*4882a593Smuzhiyun rc = -ENOMEM;
816*4882a593Smuzhiyun goto out;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun name = kstrdup(export_name, GFP_KERNEL);
819*4882a593Smuzhiyun if (!name) {
820*4882a593Smuzhiyun rc = -ENOMEM;
821*4882a593Smuzhiyun goto out;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun sysfs_bin_attr_init(attr);
825*4882a593Smuzhiyun attr->attr.name = name;
826*4882a593Smuzhiyun attr->attr.mode = 0400;
827*4882a593Smuzhiyun attr->read = export_attr_read;
828*4882a593Smuzhiyun attr->private = __va(vals[0]);
829*4882a593Smuzhiyun attr->size = vals[1];
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun rc = sysfs_create_bin_file(parent, attr);
832*4882a593Smuzhiyun out:
833*4882a593Smuzhiyun if (rc) {
834*4882a593Smuzhiyun kfree(name);
835*4882a593Smuzhiyun kfree(attr);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return rc;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
opal_add_exported_attrs(struct device_node * np,struct kobject * kobj)841*4882a593Smuzhiyun static void opal_add_exported_attrs(struct device_node *np,
842*4882a593Smuzhiyun struct kobject *kobj)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun struct device_node *child;
845*4882a593Smuzhiyun struct property *prop;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun for_each_property_of_node(np, prop) {
848*4882a593Smuzhiyun int rc;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (!strcmp(prop->name, "name") ||
851*4882a593Smuzhiyun !strcmp(prop->name, "phandle"))
852*4882a593Smuzhiyun continue;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun rc = opal_add_one_export(kobj, prop->name, np, prop->name);
855*4882a593Smuzhiyun if (rc) {
856*4882a593Smuzhiyun pr_warn("Unable to add export %pOF/%s, rc = %d!\n",
857*4882a593Smuzhiyun np, prop->name, rc);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun for_each_child_of_node(np, child) {
862*4882a593Smuzhiyun struct kobject *child_kobj;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun child_kobj = kobject_create_and_add(child->name, kobj);
865*4882a593Smuzhiyun if (!child_kobj) {
866*4882a593Smuzhiyun pr_err("Unable to create export dir for %pOF\n", child);
867*4882a593Smuzhiyun continue;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun opal_add_exported_attrs(child, child_kobj);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * opal_export_attrs: creates a sysfs node for each property listed in
876*4882a593Smuzhiyun * the device-tree under /ibm,opal/firmware/exports/
877*4882a593Smuzhiyun * All new sysfs nodes are created under /opal/exports/.
878*4882a593Smuzhiyun * This allows for reserved memory regions (e.g. HDAT) to be read.
879*4882a593Smuzhiyun * The new sysfs nodes are only readable by root.
880*4882a593Smuzhiyun */
opal_export_attrs(void)881*4882a593Smuzhiyun static void opal_export_attrs(void)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun struct device_node *np;
884*4882a593Smuzhiyun struct kobject *kobj;
885*4882a593Smuzhiyun int rc;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun np = of_find_node_by_path("/ibm,opal/firmware/exports");
888*4882a593Smuzhiyun if (!np)
889*4882a593Smuzhiyun return;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* Create new 'exports' directory - /sys/firmware/opal/exports */
892*4882a593Smuzhiyun kobj = kobject_create_and_add("exports", opal_kobj);
893*4882a593Smuzhiyun if (!kobj) {
894*4882a593Smuzhiyun pr_warn("kobject_create_and_add() of exports failed\n");
895*4882a593Smuzhiyun of_node_put(np);
896*4882a593Smuzhiyun return;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun opal_add_exported_attrs(np, kobj);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun * NB: symbol_map existed before the generic export interface so it
903*4882a593Smuzhiyun * lives under the top level opal_kobj.
904*4882a593Smuzhiyun */
905*4882a593Smuzhiyun rc = opal_add_one_export(opal_kobj, "symbol_map",
906*4882a593Smuzhiyun np->parent, "symbol-map");
907*4882a593Smuzhiyun if (rc)
908*4882a593Smuzhiyun pr_warn("Error %d creating OPAL symbols file\n", rc);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun of_node_put(np);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
opal_dump_region_init(void)913*4882a593Smuzhiyun static void __init opal_dump_region_init(void)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun void *addr;
916*4882a593Smuzhiyun uint64_t size;
917*4882a593Smuzhiyun int rc;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
920*4882a593Smuzhiyun return;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /* Register kernel log buffer */
923*4882a593Smuzhiyun addr = log_buf_addr_get();
924*4882a593Smuzhiyun if (addr == NULL)
925*4882a593Smuzhiyun return;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun size = log_buf_len_get();
928*4882a593Smuzhiyun if (size == 0)
929*4882a593Smuzhiyun return;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
932*4882a593Smuzhiyun __pa(addr), size);
933*4882a593Smuzhiyun /* Don't warn if this is just an older OPAL that doesn't
934*4882a593Smuzhiyun * know about that call
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun if (rc && rc != OPAL_UNSUPPORTED)
937*4882a593Smuzhiyun pr_warn("DUMP: Failed to register kernel log buffer. "
938*4882a593Smuzhiyun "rc = %d\n", rc);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
opal_pdev_init(const char * compatible)941*4882a593Smuzhiyun static void opal_pdev_init(const char *compatible)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun struct device_node *np;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun for_each_compatible_node(np, NULL, compatible)
946*4882a593Smuzhiyun of_platform_device_create(np, NULL, NULL);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
opal_imc_init_dev(void)949*4882a593Smuzhiyun static void __init opal_imc_init_dev(void)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct device_node *np;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
954*4882a593Smuzhiyun if (np)
955*4882a593Smuzhiyun of_platform_device_create(np, NULL, NULL);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
kopald(void * unused)958*4882a593Smuzhiyun static int kopald(void *unused)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun set_freezable();
963*4882a593Smuzhiyun do {
964*4882a593Smuzhiyun try_to_freeze();
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun opal_handle_events();
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
969*4882a593Smuzhiyun if (opal_have_pending_events())
970*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
971*4882a593Smuzhiyun else
972*4882a593Smuzhiyun schedule_timeout(timeout);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun } while (!kthread_should_stop());
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun return 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
opal_wake_poller(void)979*4882a593Smuzhiyun void opal_wake_poller(void)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun if (kopald_tsk)
982*4882a593Smuzhiyun wake_up_process(kopald_tsk);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
opal_init_heartbeat(void)985*4882a593Smuzhiyun static void opal_init_heartbeat(void)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun /* Old firwmware, we assume the HVC heartbeat is sufficient */
988*4882a593Smuzhiyun if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
989*4882a593Smuzhiyun &opal_heartbeat) != 0)
990*4882a593Smuzhiyun opal_heartbeat = 0;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (opal_heartbeat)
993*4882a593Smuzhiyun kopald_tsk = kthread_run(kopald, NULL, "kopald");
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
opal_init(void)996*4882a593Smuzhiyun static int __init opal_init(void)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct device_node *np, *consoles, *leds;
999*4882a593Smuzhiyun int rc;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun opal_node = of_find_node_by_path("/ibm,opal");
1002*4882a593Smuzhiyun if (!opal_node) {
1003*4882a593Smuzhiyun pr_warn("Device node not found\n");
1004*4882a593Smuzhiyun return -ENODEV;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /* Register OPAL consoles if any ports */
1008*4882a593Smuzhiyun consoles = of_find_node_by_path("/ibm,opal/consoles");
1009*4882a593Smuzhiyun if (consoles) {
1010*4882a593Smuzhiyun for_each_child_of_node(consoles, np) {
1011*4882a593Smuzhiyun if (!of_node_name_eq(np, "serial"))
1012*4882a593Smuzhiyun continue;
1013*4882a593Smuzhiyun of_platform_device_create(np, NULL, NULL);
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun of_node_put(consoles);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* Initialise OPAL messaging system */
1019*4882a593Smuzhiyun opal_message_init(opal_node);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun /* Initialise OPAL asynchronous completion interface */
1022*4882a593Smuzhiyun opal_async_comp_init();
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* Initialise OPAL sensor interface */
1025*4882a593Smuzhiyun opal_sensor_init();
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /* Initialise OPAL hypervisor maintainence interrupt handling */
1028*4882a593Smuzhiyun opal_hmi_handler_init();
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Create i2c platform devices */
1031*4882a593Smuzhiyun opal_pdev_init("ibm,opal-i2c");
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /* Handle non-volatile memory devices */
1034*4882a593Smuzhiyun opal_pdev_init("pmem-region");
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Setup a heatbeat thread if requested by OPAL */
1037*4882a593Smuzhiyun opal_init_heartbeat();
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* Detect In-Memory Collection counters and create devices*/
1040*4882a593Smuzhiyun opal_imc_init_dev();
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /* Create leds platform devices */
1043*4882a593Smuzhiyun leds = of_find_node_by_path("/ibm,opal/leds");
1044*4882a593Smuzhiyun if (leds) {
1045*4882a593Smuzhiyun of_platform_device_create(leds, "opal_leds", NULL);
1046*4882a593Smuzhiyun of_node_put(leds);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* Initialise OPAL message log interface */
1050*4882a593Smuzhiyun opal_msglog_init();
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* Create "opal" kobject under /sys/firmware */
1053*4882a593Smuzhiyun rc = opal_sysfs_init();
1054*4882a593Smuzhiyun if (rc == 0) {
1055*4882a593Smuzhiyun /* Setup dump region interface */
1056*4882a593Smuzhiyun opal_dump_region_init();
1057*4882a593Smuzhiyun /* Setup error log interface */
1058*4882a593Smuzhiyun rc = opal_elog_init();
1059*4882a593Smuzhiyun /* Setup code update interface */
1060*4882a593Smuzhiyun opal_flash_update_init();
1061*4882a593Smuzhiyun /* Setup platform dump extract interface */
1062*4882a593Smuzhiyun opal_platform_dump_init();
1063*4882a593Smuzhiyun /* Setup system parameters interface */
1064*4882a593Smuzhiyun opal_sys_param_init();
1065*4882a593Smuzhiyun /* Setup message log sysfs interface. */
1066*4882a593Smuzhiyun opal_msglog_sysfs_init();
1067*4882a593Smuzhiyun /* Add all export properties*/
1068*4882a593Smuzhiyun opal_export_attrs();
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* Initialize platform devices: IPMI backend, PRD & flash interface */
1072*4882a593Smuzhiyun opal_pdev_init("ibm,opal-ipmi");
1073*4882a593Smuzhiyun opal_pdev_init("ibm,opal-flash");
1074*4882a593Smuzhiyun opal_pdev_init("ibm,opal-prd");
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /* Initialise platform device: oppanel interface */
1077*4882a593Smuzhiyun opal_pdev_init("ibm,opal-oppanel");
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* Initialise OPAL kmsg dumper for flushing console on panic */
1080*4882a593Smuzhiyun opal_kmsg_init();
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /* Initialise OPAL powercap interface */
1083*4882a593Smuzhiyun opal_powercap_init();
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /* Initialise OPAL Power-Shifting-Ratio interface */
1086*4882a593Smuzhiyun opal_psr_init();
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /* Initialise OPAL sensor groups */
1089*4882a593Smuzhiyun opal_sensor_groups_init();
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* Initialise OPAL Power control interface */
1092*4882a593Smuzhiyun opal_power_control_init();
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* Initialize OPAL secure variables */
1095*4882a593Smuzhiyun opal_pdev_init("ibm,secvar-backend");
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun return 0;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun machine_subsys_initcall(powernv, opal_init);
1100*4882a593Smuzhiyun
opal_shutdown(void)1101*4882a593Smuzhiyun void opal_shutdown(void)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun long rc = OPAL_BUSY;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun opal_event_shutdown();
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun /*
1108*4882a593Smuzhiyun * Then sync with OPAL which ensure anything that can
1109*4882a593Smuzhiyun * potentially write to our memory has completed such
1110*4882a593Smuzhiyun * as an ongoing dump retrieval
1111*4882a593Smuzhiyun */
1112*4882a593Smuzhiyun while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
1113*4882a593Smuzhiyun rc = opal_sync_host_reboot();
1114*4882a593Smuzhiyun if (rc == OPAL_BUSY)
1115*4882a593Smuzhiyun opal_poll_events(NULL);
1116*4882a593Smuzhiyun else
1117*4882a593Smuzhiyun mdelay(10);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /* Unregister memory dump region */
1121*4882a593Smuzhiyun if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
1122*4882a593Smuzhiyun opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* Export this so that test modules can use it */
1126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_invalid_call);
1127*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_xscom_read);
1128*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_xscom_write);
1129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_ipmi_send);
1130*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_ipmi_recv);
1131*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_flash_read);
1132*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_flash_write);
1133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_flash_erase);
1134*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_prd_msg);
1135*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_check_token);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun /* Convert a region of vmalloc memory to an opal sg list */
opal_vmalloc_to_sg_list(void * vmalloc_addr,unsigned long vmalloc_size)1138*4882a593Smuzhiyun struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
1139*4882a593Smuzhiyun unsigned long vmalloc_size)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun struct opal_sg_list *sg, *first = NULL;
1142*4882a593Smuzhiyun unsigned long i = 0;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
1145*4882a593Smuzhiyun if (!sg)
1146*4882a593Smuzhiyun goto nomem;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun first = sg;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun while (vmalloc_size > 0) {
1151*4882a593Smuzhiyun uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
1152*4882a593Smuzhiyun uint64_t length = min(vmalloc_size, PAGE_SIZE);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun sg->entry[i].data = cpu_to_be64(data);
1155*4882a593Smuzhiyun sg->entry[i].length = cpu_to_be64(length);
1156*4882a593Smuzhiyun i++;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (i >= SG_ENTRIES_PER_NODE) {
1159*4882a593Smuzhiyun struct opal_sg_list *next;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun next = kzalloc(PAGE_SIZE, GFP_KERNEL);
1162*4882a593Smuzhiyun if (!next)
1163*4882a593Smuzhiyun goto nomem;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun sg->length = cpu_to_be64(
1166*4882a593Smuzhiyun i * sizeof(struct opal_sg_entry) + 16);
1167*4882a593Smuzhiyun i = 0;
1168*4882a593Smuzhiyun sg->next = cpu_to_be64(__pa(next));
1169*4882a593Smuzhiyun sg = next;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun vmalloc_addr += length;
1173*4882a593Smuzhiyun vmalloc_size -= length;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun return first;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun nomem:
1181*4882a593Smuzhiyun pr_err("%s : Failed to allocate memory\n", __func__);
1182*4882a593Smuzhiyun opal_free_sg_list(first);
1183*4882a593Smuzhiyun return NULL;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
opal_free_sg_list(struct opal_sg_list * sg)1186*4882a593Smuzhiyun void opal_free_sg_list(struct opal_sg_list *sg)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun while (sg) {
1189*4882a593Smuzhiyun uint64_t next = be64_to_cpu(sg->next);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun kfree(sg);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (next)
1194*4882a593Smuzhiyun sg = __va(next);
1195*4882a593Smuzhiyun else
1196*4882a593Smuzhiyun sg = NULL;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
opal_error_code(int rc)1200*4882a593Smuzhiyun int opal_error_code(int rc)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun switch (rc) {
1203*4882a593Smuzhiyun case OPAL_SUCCESS: return 0;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun case OPAL_PARAMETER: return -EINVAL;
1206*4882a593Smuzhiyun case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
1207*4882a593Smuzhiyun case OPAL_BUSY:
1208*4882a593Smuzhiyun case OPAL_BUSY_EVENT: return -EBUSY;
1209*4882a593Smuzhiyun case OPAL_NO_MEM: return -ENOMEM;
1210*4882a593Smuzhiyun case OPAL_PERMISSION: return -EPERM;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun case OPAL_UNSUPPORTED: return -EIO;
1213*4882a593Smuzhiyun case OPAL_HARDWARE: return -EIO;
1214*4882a593Smuzhiyun case OPAL_INTERNAL_ERROR: return -EIO;
1215*4882a593Smuzhiyun case OPAL_TIMEOUT: return -ETIMEDOUT;
1216*4882a593Smuzhiyun default:
1217*4882a593Smuzhiyun pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
1218*4882a593Smuzhiyun return -EIO;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
powernv_set_nmmu_ptcr(unsigned long ptcr)1222*4882a593Smuzhiyun void powernv_set_nmmu_ptcr(unsigned long ptcr)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun int rc;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_OPAL)) {
1227*4882a593Smuzhiyun rc = opal_nmmu_set_ptcr(-1UL, ptcr);
1228*4882a593Smuzhiyun if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
1229*4882a593Smuzhiyun pr_warn("%s: Unable to set nest mmu ptcr\n", __func__);
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_poll_events);
1234*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_rtc_read);
1235*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_rtc_write);
1236*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_tpo_read);
1237*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_tpo_write);
1238*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_i2c_request);
1239*4882a593Smuzhiyun /* Export these symbols for PowerNV LED class driver */
1240*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_leds_get_ind);
1241*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_leds_set_ind);
1242*4882a593Smuzhiyun /* Export this symbol for PowerNV Operator Panel class driver */
1243*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
1244*4882a593Smuzhiyun /* Export this for KVM */
1245*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
1246*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_int_eoi);
1247*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(opal_error_code);
1248*4882a593Smuzhiyun /* Export the below symbol for NX compression */
1249*4882a593Smuzhiyun EXPORT_SYMBOL(opal_nx_coproc_init);
1250