1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * Copyright (c) 2017-2025, STMicroelectronics - All Rights Reserved
4 */
5
6 #include <assert.h>
7 #include <drivers/clk.h>
8 #include <drivers/clk_dt.h>
9 #include <drivers/rstctrl.h>
10 #include <drivers/wdt.h>
11 #include <io.h>
12 #include <keep.h>
13 #include <kernel/boot.h>
14 #include <kernel/delay.h>
15 #include <kernel/dt.h>
16 #include <kernel/dt_driver.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <kernel/pm.h>
21 #include <kernel/spinlock.h>
22 #include <kernel/tee_time.h>
23 #include <libfdt.h>
24 #include <mm/core_memprot.h>
25 #include <sm/sm.h>
26 #include <stdint.h>
27 #include <stm32_util.h>
28 #include <string.h>
29 #include <trace.h>
30
31 /* IWDG Compatibility */
32 #define IWDG_TIMEOUT_US U(40000)
33 #define IWDG_CNT_MASK GENMASK_32(11, 0)
34 #define IWDG_ONF_MIN_VER U(0x31)
35 #define IWDG_ICR_MIN_VER U(0x40)
36
37 /* IWDG registers offsets */
38 #define IWDG_KR_OFFSET U(0x00)
39 #define IWDG_PR_OFFSET U(0x04)
40 #define IWDG_RLR_OFFSET U(0x08)
41 #define IWDG_SR_OFFSET U(0x0C)
42 #define IWDG_EWCR_OFFSET U(0x14)
43 #define IWDG_ICR_OFFSET U(0x18)
44 #define IWDG_VERR_OFFSET U(0x3F4)
45
46 #define IWDG_KR_WPROT_KEY U(0x0000)
47 #define IWDG_KR_ACCESS_KEY U(0x5555)
48 #define IWDG_KR_RELOAD_KEY U(0xAAAA)
49 #define IWDG_KR_START_KEY U(0xCCCC)
50
51 /* Use a fixed prescaler divider of 1024 */
52 #define IWDG_PRESCALER_1024 U(1024)
53 #define IWDG_PR_DIV_1024 U(0x8)
54 #define IWDG_PR_DIV_MASK GENMASK_32(3, 0)
55
56 #define IWDG_SR_PVU BIT(0)
57 #define IWDG_SR_RVU BIT(1)
58 #define IWDG_SR_WVU BIT(2)
59 #define IWDG_SR_EWU BIT(3)
60 #define IWDG_SR_UPDATE_MASK (IWDG_SR_PVU | IWDG_SR_RVU | IWDG_SR_WVU | \
61 IWDG_SR_EWU)
62 #define IWDG_SR_ONF BIT(8)
63 #define IWDG_SR_EWIF BIT(14)
64 #define IWDG_SR_EWIF_V40 BIT(15)
65
66 #define IWDG_EWCR_EWIE BIT(15)
67 #define IWDG_EWCR_EWIC BIT(14)
68
69 #define IWDG_ICR_EWIC BIT(15)
70
71 #define IWDG_VERR_REV_MASK GENMASK_32(7, 0)
72
73 /* Define default early timeout delay to 5 sec before timeout */
74 #define IWDG_ETIMEOUT_SEC U(5)
75
76 /*
77 * Values for struct stm32_iwdg_device::flags
78 * IWDG_FLAGS_ENABLED Watchdog has been enabled
79 */
80 #define IWDG_FLAGS_ENABLED BIT(0)
81
82 /*
83 * IWDG watch instance data
84 * @base - IWDG interface IOMEM base address
85 * @clk_pclk - Bus clock
86 * @clk_lsi - IWDG source clock
87 * @itr_chip - Interrupt chip device
88 * @itr_num - Interrupt number for the IWDG instance
89 * @itr_handler - Interrupt handler
90 * @reset - Reset controller device used to control the ability of the watchdog
91 * to reset the system
92 * @flags - Property flags for the IWDG instance
93 * @timeout - Watchdog elaspure timeout
94 * @saved_nb_int - Saved number of interrupts before panic
95 * @nb_int - Remaining number of interrupts before panic
96 * @hw_version - Watchdog HW version
97 * @last_refresh - Time of last watchdog refresh
98 * @wdt_chip - Wathcdog chip instance
99 * @max_hw_timeout - Maximum hardware timeout
100 */
101 struct stm32_iwdg_device {
102 struct io_pa_va base;
103 struct clk *clk_pclk;
104 struct clk *clk_lsi;
105 struct itr_chip *itr_chip;
106 size_t itr_num;
107 struct itr_handler *itr_handler;
108 struct rstctrl *reset;
109 uint32_t flags;
110 unsigned long timeout;
111 unsigned long early_timeout;
112 unsigned long saved_nb_int;
113 unsigned long nb_int;
114 unsigned int hw_version;
115 TEE_Time last_refresh;
116 struct wdt_chip wdt_chip;
117 unsigned long max_hw_timeout;
118 };
119
sr_ewif_mask(struct stm32_iwdg_device * iwdg)120 static uint32_t sr_ewif_mask(struct stm32_iwdg_device *iwdg)
121 {
122 if (iwdg->hw_version >= IWDG_ICR_MIN_VER)
123 return IWDG_SR_EWIF_V40;
124 else
125 return IWDG_SR_EWIF;
126 }
127
get_base(struct stm32_iwdg_device * iwdg)128 static vaddr_t get_base(struct stm32_iwdg_device *iwdg)
129 {
130 return io_pa_or_va(&iwdg->base, 1);
131 }
132
iwdg_wdt_set_enabled(struct stm32_iwdg_device * iwdg)133 static void iwdg_wdt_set_enabled(struct stm32_iwdg_device *iwdg)
134 {
135 iwdg->flags |= IWDG_FLAGS_ENABLED;
136 }
137
iwdg_wdt_is_enabled(struct stm32_iwdg_device * iwdg)138 static bool iwdg_wdt_is_enabled(struct stm32_iwdg_device *iwdg)
139 {
140 return iwdg->flags & IWDG_FLAGS_ENABLED;
141 }
142
143 /* Return counter value to related to input timeout in seconds, or 0 on error */
iwdg_timeout_cnt(struct stm32_iwdg_device * iwdg,unsigned long to_sec)144 static uint32_t iwdg_timeout_cnt(struct stm32_iwdg_device *iwdg,
145 unsigned long to_sec)
146 {
147 uint64_t reload = (uint64_t)to_sec * clk_get_rate(iwdg->clk_lsi);
148 uint64_t cnt = (reload / IWDG_PRESCALER_1024) - 1;
149
150 /* Be safe and expect any counter to be above 2 */
151 if (cnt > IWDG_CNT_MASK || cnt < 3)
152 return 0;
153
154 return cnt;
155 }
156
157 /* Wait IWDG programming completes */
iwdg_wait_sync(struct stm32_iwdg_device * iwdg)158 static TEE_Result iwdg_wait_sync(struct stm32_iwdg_device *iwdg)
159 {
160 uint64_t timeout_ref = timeout_init_us(IWDG_TIMEOUT_US);
161 vaddr_t iwdg_base = get_base(iwdg);
162
163 while (io_read32(iwdg_base + IWDG_SR_OFFSET) & IWDG_SR_UPDATE_MASK)
164 if (timeout_elapsed(timeout_ref))
165 break;
166
167 if (io_read32(iwdg_base + IWDG_SR_OFFSET) & IWDG_SR_UPDATE_MASK)
168 return TEE_ERROR_GENERIC;
169
170 return TEE_SUCCESS;
171 }
172
stm32_iwdg_it_ack(struct stm32_iwdg_device * iwdg)173 static void stm32_iwdg_it_ack(struct stm32_iwdg_device *iwdg)
174 {
175 vaddr_t iwdg_base = get_base(iwdg);
176
177 if (iwdg->hw_version >= IWDG_ICR_MIN_VER)
178 io_setbits32(iwdg_base + IWDG_ICR_OFFSET, IWDG_ICR_EWIC);
179 else
180 io_setbits32(iwdg_base + IWDG_EWCR_OFFSET, IWDG_EWCR_EWIC);
181 }
182
stm32_iwdg_it_handler(struct itr_handler * h)183 static enum itr_return stm32_iwdg_it_handler(struct itr_handler *h)
184 {
185 unsigned int __maybe_unused cpu = get_core_pos();
186 struct stm32_iwdg_device *iwdg = h->data;
187 vaddr_t iwdg_base = get_base(iwdg);
188
189 DMSG("CPU %u IT Watchdog %#"PRIxPA, cpu, iwdg->base.pa);
190
191 /* Check for spurious interrupt */
192 if (!(io_read32(iwdg_base + IWDG_SR_OFFSET) & sr_ewif_mask(iwdg)))
193 return ITRR_NONE;
194
195 /*
196 * Writing IWDG_EWCR_EWIT triggers a watchdog refresh.
197 * To prevent the watchdog refresh, write-protect all the registers;
198 * this makes read-only all IWDG_EWCR fields except IWDG_EWCR_EWIC.
199 */
200 io_write32(iwdg_base + IWDG_KR_OFFSET, IWDG_KR_WPROT_KEY);
201
202 /* Disable early interrupt */
203 stm32_iwdg_it_ack(iwdg);
204
205 if (iwdg->nb_int > 0) {
206 /* Decrease interrupt counter when watchdog is not stopped*/
207 if (iwdg->nb_int < ULONG_MAX)
208 iwdg->nb_int--;
209 io_write32(get_base(iwdg) + IWDG_KR_OFFSET, IWDG_KR_RELOAD_KEY);
210 } else {
211 panic("Watchdog");
212 }
213
214 return ITRR_HANDLED;
215 }
216 DECLARE_KEEP_PAGER(stm32_iwdg_it_handler);
217
configure_timeout(struct stm32_iwdg_device * iwdg)218 static TEE_Result configure_timeout(struct stm32_iwdg_device *iwdg)
219 {
220 TEE_Result res = TEE_ERROR_GENERIC;
221 vaddr_t iwdg_base = get_base(iwdg);
222 uint32_t rlr_value = 0;
223 uint32_t ewie_value = 0;
224
225 assert(iwdg_wdt_is_enabled(iwdg));
226
227 rlr_value = iwdg_timeout_cnt(iwdg, iwdg->timeout);
228 if (!rlr_value)
229 return TEE_ERROR_GENERIC;
230
231 if (iwdg->itr_handler) {
232 ewie_value = iwdg_timeout_cnt(iwdg, iwdg->early_timeout);
233 interrupt_enable(iwdg->itr_chip, iwdg->itr_num);
234 }
235
236 io_write32(iwdg_base + IWDG_KR_OFFSET, IWDG_KR_ACCESS_KEY);
237 io_write32(iwdg_base + IWDG_PR_OFFSET, IWDG_PR_DIV_1024);
238 io_write32(iwdg_base + IWDG_RLR_OFFSET, rlr_value);
239 if (ewie_value &&
240 !(io_read32(iwdg_base + IWDG_EWCR_OFFSET) & IWDG_EWCR_EWIE))
241 io_write32(iwdg_base + IWDG_EWCR_OFFSET,
242 ewie_value | IWDG_EWCR_EWIE);
243
244 res = iwdg_wait_sync(iwdg);
245
246 io_write32(iwdg_base + IWDG_KR_OFFSET, IWDG_KR_RELOAD_KEY);
247
248 return res;
249 }
250
iwdg_start(struct stm32_iwdg_device * iwdg)251 static void iwdg_start(struct stm32_iwdg_device *iwdg)
252 {
253 TEE_Result res = TEE_ERROR_GENERIC;
254
255 res = tee_time_get_sys_time(&iwdg->last_refresh);
256 if (res)
257 panic();
258
259 io_write32(get_base(iwdg) + IWDG_KR_OFFSET, IWDG_KR_START_KEY);
260
261 iwdg_wdt_set_enabled(iwdg);
262 }
263
iwdg_refresh(struct stm32_iwdg_device * iwdg)264 static void iwdg_refresh(struct stm32_iwdg_device *iwdg)
265 {
266 TEE_Result res = TEE_ERROR_GENERIC;
267
268 res = tee_time_get_sys_time(&iwdg->last_refresh);
269 if (res)
270 panic();
271
272 io_write32(get_base(iwdg) + IWDG_KR_OFFSET, IWDG_KR_RELOAD_KEY);
273 }
274
275 /* Operators for watchdog OP-TEE interface */
wdt_chip_to_iwdg(struct wdt_chip * chip)276 static struct stm32_iwdg_device *wdt_chip_to_iwdg(struct wdt_chip *chip)
277 {
278 return container_of(chip, struct stm32_iwdg_device, wdt_chip);
279 }
280
iwdg_wdt_init(struct wdt_chip * chip,unsigned long * min_timeout,unsigned long * max_timeout)281 static TEE_Result iwdg_wdt_init(struct wdt_chip *chip,
282 unsigned long *min_timeout,
283 unsigned long *max_timeout)
284 {
285 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
286 unsigned long rate = clk_get_rate(iwdg->clk_lsi);
287
288 if (!rate)
289 return TEE_ERROR_GENERIC;
290
291 /* Be safe and expect any counter to be above 2 */
292 *min_timeout = 3 * IWDG_PRESCALER_1024 / rate;
293 *max_timeout = (IWDG_CNT_MASK + 1) * IWDG_PRESCALER_1024 / rate;
294
295 return TEE_SUCCESS;
296 }
297
iwdg_wdt_start(struct wdt_chip * chip)298 static void iwdg_wdt_start(struct wdt_chip *chip)
299 {
300 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
301
302 iwdg_start(iwdg);
303 if (iwdg->reset && iwdg->itr_handler)
304 stm32_iwdg_it_ack(iwdg);
305
306 if (configure_timeout(iwdg))
307 panic();
308
309 if (iwdg->reset)
310 if (rstctrl_assert(iwdg->reset))
311 panic();
312 }
313
iwdg_wdt_stop(struct wdt_chip * chip)314 static void iwdg_wdt_stop(struct wdt_chip *chip)
315 {
316 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
317
318 if (iwdg->reset) {
319 if (rstctrl_deassert(iwdg->reset))
320 panic();
321 if (iwdg->itr_handler)
322 interrupt_disable(iwdg->itr_chip, iwdg->itr_num);
323 }
324
325 /* Reload on early interrupt and no more panic */
326 iwdg->saved_nb_int = ULONG_MAX;
327 iwdg->nb_int = ULONG_MAX;
328 }
329
iwdg_wdt_refresh(struct wdt_chip * chip)330 static void iwdg_wdt_refresh(struct wdt_chip *chip)
331 {
332 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
333
334 iwdg->nb_int = iwdg->saved_nb_int;
335 iwdg_refresh(iwdg);
336 }
337
stm32_iwdg_handle_timeouts(struct stm32_iwdg_device * iwdg,unsigned long timeout_sec)338 static void stm32_iwdg_handle_timeouts(struct stm32_iwdg_device *iwdg,
339 unsigned long timeout_sec)
340 {
341 unsigned long interval = 0;
342 unsigned long rate = 0;
343 unsigned long n = 0;
344 long w = 0;
345
346 rate = clk_get_rate(iwdg->clk_lsi);
347 iwdg->max_hw_timeout = (IWDG_CNT_MASK + 1) * IWDG_PRESCALER_1024 / rate;
348
349 if (timeout_sec > iwdg->max_hw_timeout) {
350 IMSG("Timeout exceeds hardware capability, approximate it");
351 interval = iwdg->max_hw_timeout - IWDG_ETIMEOUT_SEC;
352 n = (timeout_sec - IWDG_ETIMEOUT_SEC) / interval;
353 w = ((timeout_sec - IWDG_ETIMEOUT_SEC) / (n + 1)) +
354 IWDG_ETIMEOUT_SEC;
355 iwdg->timeout = w;
356 iwdg->early_timeout = IWDG_ETIMEOUT_SEC;
357 } else {
358 iwdg->timeout = timeout_sec;
359 if (iwdg->timeout >= 2 * IWDG_ETIMEOUT_SEC)
360 iwdg->early_timeout = IWDG_ETIMEOUT_SEC;
361 else
362 iwdg->early_timeout = iwdg->timeout / 4;
363 }
364
365 if (!iwdg->early_timeout)
366 iwdg->early_timeout = 1;
367
368 iwdg->saved_nb_int = n;
369 iwdg->nb_int = n;
370 }
371
iwdg_wdt_set_timeout(struct wdt_chip * chip,unsigned long timeout)372 static TEE_Result iwdg_wdt_set_timeout(struct wdt_chip *chip,
373 unsigned long timeout)
374 {
375 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
376
377 if (iwdg_wdt_is_enabled(iwdg)) {
378 TEE_Result res = TEE_ERROR_GENERIC;
379
380 stm32_iwdg_handle_timeouts(iwdg, timeout);
381
382 res = configure_timeout(iwdg);
383 if (res)
384 return res;
385 }
386
387 return TEE_SUCCESS;
388 }
389
iwdg_wdt_get_timeleft(struct wdt_chip * chip,bool * is_started,unsigned long * timeleft)390 static TEE_Result iwdg_wdt_get_timeleft(struct wdt_chip *chip, bool *is_started,
391 unsigned long *timeleft)
392 {
393 struct stm32_iwdg_device *iwdg = wdt_chip_to_iwdg(chip);
394 TEE_Result res = TEE_ERROR_GENERIC;
395 TEE_Time time = { };
396 TEE_Time now = { };
397
398 *is_started = iwdg_wdt_is_enabled(iwdg);
399
400 if (!*is_started)
401 return TEE_SUCCESS;
402
403 res = tee_time_get_sys_time(&now);
404 if (res)
405 panic();
406
407 time.seconds =
408 (iwdg->timeout - iwdg->early_timeout) * iwdg->saved_nb_int
409 + iwdg->early_timeout;
410 TEE_TIME_ADD(iwdg->last_refresh, time, time);
411 if (TEE_TIME_LE(time, now)) {
412 *timeleft = 0;
413 } else {
414 TEE_TIME_SUB(time, now, time);
415 *timeleft = time.seconds;
416 }
417
418 return TEE_SUCCESS;
419 }
420
421 static const struct wdt_ops stm32_iwdg_ops = {
422 .init = iwdg_wdt_init,
423 .start = iwdg_wdt_start,
424 .stop = iwdg_wdt_stop,
425 .ping = iwdg_wdt_refresh,
426 .set_timeout = iwdg_wdt_set_timeout,
427 .get_timeleft = iwdg_wdt_get_timeleft,
428 };
429 DECLARE_KEEP_PAGER(stm32_iwdg_ops);
430
431 /* Driver initialization */
stm32_iwdg_parse_fdt(struct stm32_iwdg_device * iwdg,const void * fdt,int node)432 static TEE_Result stm32_iwdg_parse_fdt(struct stm32_iwdg_device *iwdg,
433 const void *fdt, int node)
434 {
435 TEE_Result res = TEE_ERROR_GENERIC;
436 struct dt_node_info dt_info = { };
437 const fdt32_t *cuint = NULL;
438
439 fdt_fill_device_info(fdt, &dt_info, node);
440
441 if (dt_info.reg == DT_INFO_INVALID_REG ||
442 dt_info.reg_size == DT_INFO_INVALID_REG_SIZE)
443 panic();
444
445 res = clk_dt_get_by_name(fdt, node, "pclk", &iwdg->clk_pclk);
446 if (res)
447 return res;
448
449 res = clk_dt_get_by_name(fdt, node, "lsi", &iwdg->clk_lsi);
450 if (res)
451 return res;
452
453 res = interrupt_dt_get(fdt, node, &iwdg->itr_chip, &iwdg->itr_num);
454 if (res && res != TEE_ERROR_ITEM_NOT_FOUND)
455 return res;
456 if (!res) {
457 res = interrupt_create_handler(iwdg->itr_chip, iwdg->itr_num,
458 stm32_iwdg_it_handler, iwdg, 0,
459 &iwdg->itr_handler);
460 if (res)
461 return res;
462 }
463
464 res = rstctrl_dt_get_by_index(fdt, node, 0, &iwdg->reset);
465 if (res && res != TEE_ERROR_ITEM_NOT_FOUND)
466 goto err_itr;
467
468 /* Get IOMEM address */
469 iwdg->base.pa = dt_info.reg;
470 io_pa_or_va_secure(&iwdg->base, dt_info.reg_size);
471 assert(iwdg->base.va);
472
473 /* Get and check timeout value */
474 cuint = fdt_getprop(fdt, node, "timeout-sec", NULL);
475 if (!cuint) {
476 res = TEE_ERROR_BAD_PARAMETERS;
477 goto err_itr;
478 }
479
480 iwdg->timeout = (int)fdt32_to_cpu(*cuint);
481 if (!iwdg->timeout) {
482 res = TEE_ERROR_BAD_PARAMETERS;
483 goto err_itr;
484 }
485
486 return TEE_SUCCESS;
487
488 err_itr:
489 interrupt_remove_free_handler(iwdg->itr_handler);
490
491 return res;
492 }
493
iwdg_wdt_get_version_and_status(struct stm32_iwdg_device * iwdg)494 static void iwdg_wdt_get_version_and_status(struct stm32_iwdg_device *iwdg)
495 {
496 vaddr_t iwdg_base = get_base(iwdg);
497 uint32_t rlr_value = 0;
498
499 iwdg->hw_version = io_read32(iwdg_base + IWDG_VERR_OFFSET) &
500 IWDG_VERR_REV_MASK;
501
502 /* Test if watchdog is already running */
503 if (iwdg->hw_version >= IWDG_ONF_MIN_VER) {
504 if (io_read32(iwdg_base + IWDG_SR_OFFSET) & IWDG_SR_ONF)
505 iwdg_wdt_set_enabled(iwdg);
506 } else {
507 /*
508 * Workaround for old versions without IWDG_SR_ONF bit:
509 * - write in IWDG_RLR_OFFSET
510 * - wait for sync
511 * - if sync succeeds, then iwdg is running
512 */
513 io_write32(iwdg_base + IWDG_KR_OFFSET, IWDG_KR_ACCESS_KEY);
514
515 rlr_value = io_read32(iwdg_base + IWDG_RLR_OFFSET);
516 io_write32(iwdg_base + IWDG_RLR_OFFSET, rlr_value);
517
518 if (!iwdg_wait_sync(iwdg))
519 iwdg_wdt_set_enabled(iwdg);
520
521 io_write32(iwdg_base + IWDG_KR_OFFSET, IWDG_KR_WPROT_KEY);
522 }
523
524 DMSG("Watchdog is %sabled", iwdg_wdt_is_enabled(iwdg) ? "en" : "dis");
525 }
526
stm32_iwdg_pm(enum pm_op op,unsigned int pm_hint __unused,const struct pm_callback_handle * pm_handle)527 static TEE_Result stm32_iwdg_pm(enum pm_op op, unsigned int pm_hint __unused,
528 const struct pm_callback_handle *pm_handle)
529 {
530 struct stm32_iwdg_device *iwdg = PM_CALLBACK_GET_HANDLE(pm_handle);
531
532 if (op == PM_OP_RESUME) {
533 clk_enable(iwdg->clk_lsi);
534 clk_enable(iwdg->clk_pclk);
535 } else {
536 clk_disable(iwdg->clk_lsi);
537 clk_disable(iwdg->clk_pclk);
538 }
539
540 return TEE_SUCCESS;
541 }
542 DECLARE_KEEP_PAGER(stm32_iwdg_pm);
543
stm32_iwdg_probe(const void * fdt,int node,const void * compat_data __unused)544 static TEE_Result stm32_iwdg_probe(const void *fdt, int node,
545 const void *compat_data __unused)
546 {
547 struct stm32_iwdg_device *iwdg = NULL;
548 TEE_Result res = TEE_SUCCESS;
549
550 iwdg = calloc(1, sizeof(*iwdg));
551 if (!iwdg)
552 return TEE_ERROR_OUT_OF_MEMORY;
553
554 res = stm32_iwdg_parse_fdt(iwdg, fdt, node);
555 if (res)
556 goto out_free;
557
558 /* Enable watchdog source and bus clocks once for all */
559 if (clk_enable(iwdg->clk_lsi))
560 panic();
561
562 if (clk_enable(iwdg->clk_pclk))
563 panic();
564
565 iwdg_wdt_get_version_and_status(iwdg);
566
567 res = iwdg_wdt_set_timeout(&iwdg->wdt_chip, iwdg->timeout);
568 if (res)
569 panic();
570
571 if (iwdg_wdt_is_enabled(iwdg))
572 iwdg_wdt_refresh(&iwdg->wdt_chip);
573
574 iwdg->wdt_chip.ops = &stm32_iwdg_ops;
575
576 register_pm_core_service_cb(stm32_iwdg_pm, iwdg, "stm32-iwdg");
577
578 res = watchdog_register(&iwdg->wdt_chip);
579 if (res)
580 goto out_pm;
581
582 return TEE_SUCCESS;
583
584 out_pm:
585 unregister_pm_core_service_cb(stm32_iwdg_pm, iwdg);
586 out_free:
587 free(iwdg);
588
589 return res;
590 }
591
592 static const struct dt_device_match stm32_iwdg_match_table[] = {
593 { .compatible = "st,stm32mp1-iwdg" },
594 { }
595 };
596
597 DEFINE_DT_DRIVER(stm32_iwdg_dt_driver) = {
598 .name = "stm32-iwdg",
599 .match_table = stm32_iwdg_match_table,
600 .probe = stm32_iwdg_probe,
601 };
602