xref: /optee_os/core/drivers/gic.c (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <compiler.h>
11 #include <drivers/gic.h>
12 #include <keep.h>
13 #include <kernel/dt.h>
14 #include <kernel/interrupt.h>
15 #include <kernel/panic.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <libfdt.h>
19 #include <util.h>
20 #include <io.h>
21 #include <trace.h>
22 
23 /* Offsets from gic.gicc_base */
24 #define GICC_CTLR		(0x000)
25 #define GICC_PMR		(0x004)
26 #define GICC_IAR		(0x00C)
27 #define GICC_EOIR		(0x010)
28 
29 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
30 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
31 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
32 #define GICC_CTLR_FIQEN		(1 << 3)
33 
34 /* Offsets from gic.gicd_base */
35 #define GICD_CTLR		(0x000)
36 #define GICD_TYPER		(0x004)
37 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
38 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
39 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
40 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
41 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
42 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
43 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
44 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
45 #define GICD_SGIR		(0xF00)
46 
47 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
48 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
49 
50 /* Number of Private Peripheral Interrupt */
51 #define NUM_PPI	32
52 
53 /* Number of Software Generated Interrupt */
54 #define NUM_SGI			16
55 
56 /* Number of Non-secure Software Generated Interrupt */
57 #define NUM_NS_SGI		8
58 
59 /* Number of interrupts in one register */
60 #define NUM_INTS_PER_REG	32
61 
62 /* Number of targets in one register */
63 #define NUM_TARGETS_PER_REG	4
64 
65 /* Accessors to access ITARGETSRn */
66 #define ITARGETSR_FIELD_BITS	8
67 #define ITARGETSR_FIELD_MASK	0xff
68 
69 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
70 #define GICC_IAR_IT_ID_MASK	0x3ff
71 #define GICC_IAR_CPU_ID_MASK	0x7
72 #define GICC_IAR_CPU_ID_SHIFT	10
73 
74 struct gic_data {
75 	vaddr_t gicc_base;
76 	vaddr_t gicd_base;
77 	size_t max_it;
78 	struct itr_chip chip;
79 };
80 
81 static struct gic_data gic_data __nex_bss;
82 
83 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
84 		       uint32_t prio);
85 static void gic_op_enable(struct itr_chip *chip, size_t it);
86 static void gic_op_disable(struct itr_chip *chip, size_t it);
87 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
88 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
89 			uint8_t cpu_mask);
90 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
91 			uint8_t cpu_mask);
92 
93 static const struct itr_ops gic_ops = {
94 	.add = gic_op_add,
95 	.mask = gic_op_disable,
96 	.unmask = gic_op_enable,
97 	.enable = gic_op_enable,
98 	.disable = gic_op_disable,
99 	.raise_pi = gic_op_raise_pi,
100 	.raise_sgi = gic_op_raise_sgi,
101 	.set_affinity = gic_op_set_affinity,
102 };
103 DECLARE_KEEP_PAGER(gic_ops);
104 
105 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
106 {
107 	int i;
108 	uint32_t old_ctlr;
109 	size_t ret = 0;
110 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
111 			  GICD_TYPER_IT_LINES_NUM_MASK;
112 
113 	/*
114 	 * Probe which interrupt number is the largest.
115 	 */
116 #if defined(CFG_ARM_GICV3)
117 	old_ctlr = read_icc_ctlr();
118 	write_icc_ctlr(0);
119 #else
120 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
121 	io_write32(gicc_base + GICC_CTLR, 0);
122 #endif
123 	for (i = max_regs; i >= 0; i--) {
124 		uint32_t old_reg;
125 		uint32_t reg;
126 		int b;
127 
128 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
129 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
130 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
131 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
132 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
133 			if (BIT32(b) & reg) {
134 				ret = i * NUM_INTS_PER_REG + b;
135 				goto out;
136 			}
137 		}
138 	}
139 out:
140 #if defined(CFG_ARM_GICV3)
141 	write_icc_ctlr(old_ctlr);
142 #else
143 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
144 #endif
145 	return ret;
146 }
147 
148 void gic_cpu_init(void)
149 {
150 	struct gic_data *gd = &gic_data;
151 
152 #if defined(CFG_ARM_GICV3)
153 	assert(gd->gicd_base);
154 #else
155 	assert(gd->gicd_base && gd->gicc_base);
156 #endif
157 
158 	/* per-CPU interrupts config:
159 	 * ID0-ID7(SGI)   for Non-secure interrupts
160 	 * ID8-ID15(SGI)  for Secure interrupts.
161 	 * All PPI config as Non-secure interrupts.
162 	 */
163 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
164 
165 	/* Set the priority mask to permit Non-secure interrupts, and to
166 	 * allow the Non-secure world to adjust the priority mask itself
167 	 */
168 #if defined(CFG_ARM_GICV3)
169 	write_icc_pmr(0x80);
170 	write_icc_igrpen1(1);
171 #else
172 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
173 
174 	/* Enable GIC */
175 	io_write32(gd->gicc_base + GICC_CTLR,
176 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
177 		   GICC_CTLR_FIQEN);
178 #endif
179 }
180 
181 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
182 			  uint32_t *prio)
183 {
184 	int it_num = DT_INFO_INVALID_INTERRUPT;
185 
186 	if (type)
187 		*type = IRQ_TYPE_NONE;
188 
189 	if (prio)
190 		*prio = 0;
191 
192 	if (!properties || count < 2)
193 		return DT_INFO_INVALID_INTERRUPT;
194 
195 	it_num = fdt32_to_cpu(properties[1]);
196 
197 	switch (fdt32_to_cpu(properties[0])) {
198 	case 1:
199 		it_num += 16;
200 		break;
201 	case 0:
202 		it_num += 32;
203 		break;
204 	default:
205 		it_num = DT_INFO_INVALID_INTERRUPT;
206 	}
207 
208 	return it_num;
209 }
210 
211 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
212 {
213 	struct gic_data *gd = &gic_data;
214 	vaddr_t gicc_base = 0;
215 	vaddr_t gicd_base = 0;
216 
217 	assert(cpu_mmu_enabled());
218 
219 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
220 				    GIC_DIST_REG_SIZE);
221 	if (!gicd_base)
222 		panic();
223 
224 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
225 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
226 					    GIC_CPU_REG_SIZE);
227 		if (!gicc_base)
228 			panic();
229 	}
230 
231 	gd->gicc_base = gicc_base;
232 	gd->gicd_base = gicd_base;
233 	gd->max_it = probe_max_it(gicc_base, gicd_base);
234 	gd->chip.ops = &gic_ops;
235 
236 	if (IS_ENABLED(CFG_DT))
237 		gd->chip.dt_get_irq = gic_dt_get_irq;
238 }
239 
240 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
241 {
242 	struct gic_data __maybe_unused *gd = &gic_data;
243 	size_t __maybe_unused n = 0;
244 
245 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
246 
247 	/* GIC configuration is initialized from TF-A when embedded */
248 #ifndef CFG_WITH_ARM_TRUSTED_FW
249 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
250 		/* Disable interrupts */
251 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
252 
253 		/* Make interrupts non-pending */
254 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
255 
256 		/* Mark interrupts non-secure */
257 		if (n == 0) {
258 			/* per-CPU inerrupts config:
259 			 * ID0-ID7(SGI)	  for Non-secure interrupts
260 			 * ID8-ID15(SGI)  for Secure interrupts.
261 			 * All PPI config as Non-secure interrupts.
262 			 */
263 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
264 		} else {
265 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
266 		}
267 	}
268 
269 	/* Set the priority mask to permit Non-secure interrupts, and to
270 	 * allow the Non-secure world to adjust the priority mask itself
271 	 */
272 #if defined(CFG_ARM_GICV3)
273 	write_icc_pmr(0x80);
274 	write_icc_igrpen1(1);
275 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
276 #else
277 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
278 
279 	/* Enable GIC */
280 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
281 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
282 	io_setbits32(gd->gicd_base + GICD_CTLR,
283 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
284 #endif
285 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
286 
287 	interrupt_main_init(&gic_data.chip);
288 }
289 
290 static void gic_it_add(struct gic_data *gd, size_t it)
291 {
292 	size_t idx = it / NUM_INTS_PER_REG;
293 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
294 
295 	assert(gd == &gic_data);
296 
297 	/* Disable the interrupt */
298 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
299 	/* Make it non-pending */
300 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
301 	/* Assign it to group0 */
302 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
303 #if defined(CFG_ARM_GICV3)
304 	/* Assign it to group1S */
305 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
306 #endif
307 }
308 
309 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
310 				uint8_t cpu_mask)
311 {
312 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
313 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
314 	uint32_t target, target_shift;
315 	vaddr_t itargetsr = gd->gicd_base +
316 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
317 
318 	assert(gd == &gic_data);
319 
320 	/* Assigned to group0 */
321 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
322 
323 	/* Route it to selected CPUs */
324 	target = io_read32(itargetsr);
325 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
326 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
327 	target |= cpu_mask << target_shift;
328 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
329 	io_write32(itargetsr, target);
330 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
331 }
332 
333 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
334 {
335 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
336 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
337 
338 	assert(gd == &gic_data);
339 
340 	/* Assigned to group0 */
341 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
342 
343 	/* Set prio it to selected CPUs */
344 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
345 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
346 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
347 }
348 
349 static void gic_it_enable(struct gic_data *gd, size_t it)
350 {
351 	size_t idx = it / NUM_INTS_PER_REG;
352 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
353 	vaddr_t base = gd->gicd_base;
354 
355 	assert(gd == &gic_data);
356 
357 	/* Assigned to group0 */
358 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
359 
360 	/* Enable the interrupt */
361 	io_write32(base + GICD_ISENABLER(idx), mask);
362 }
363 
364 static void gic_it_disable(struct gic_data *gd, size_t it)
365 {
366 	size_t idx = it / NUM_INTS_PER_REG;
367 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
368 
369 	assert(gd == &gic_data);
370 
371 	/* Assigned to group0 */
372 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
373 
374 	/* Disable the interrupt */
375 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
376 }
377 
378 static void gic_it_set_pending(struct gic_data *gd, size_t it)
379 {
380 	size_t idx = it / NUM_INTS_PER_REG;
381 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
382 
383 	assert(gd == &gic_data);
384 
385 	/* Should be Peripheral Interrupt */
386 	assert(it >= NUM_SGI);
387 
388 	/* Raise the interrupt */
389 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
390 }
391 
392 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
393 			     uint8_t cpu_mask, uint8_t group)
394 {
395 #if defined(CFG_ARM_GICV3)
396 	/* Only support sending SGI to the cores in the same cluster now */
397 	uint32_t mask_id = it & 0xf;
398 	uint32_t mask_cpu = cpu_mask & 0xff;
399 	uint64_t mpidr = read_mpidr();
400 	uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT;
401 	uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >> MPIDR_AFF2_SHIFT;
402 	uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >> MPIDR_AFF3_SHIFT;
403 	uint64_t mask = (mask_cpu |
404 			SHIFT_U64(mask_aff1, 16) |
405 			SHIFT_U64(mask_id, 24)   |
406 			SHIFT_U64(mask_aff2, 32) |
407 			SHIFT_U64(mask_aff3, 48));
408 
409 	/* Raise the interrupt */
410 	if (group)
411 		write_icc_asgi1r(mask);
412 	else
413 		write_icc_sgi1r(mask);
414 #else
415 	uint32_t mask_id = it & 0xf;
416 	uint32_t mask_group = group & 0x1;
417 	uint32_t mask_cpu = cpu_mask & 0xff;
418 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
419 		SHIFT_U32(mask_cpu, 16));
420 
421 	/* Raise the interrupt */
422 	io_write32(gd->gicd_base + GICD_SGIR, mask);
423 #endif
424 }
425 
426 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
427 {
428 	assert(gd == &gic_data);
429 
430 #if defined(CFG_ARM_GICV3)
431 	return read_icc_iar1();
432 #else
433 	return io_read32(gd->gicc_base + GICC_IAR);
434 #endif
435 }
436 
437 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
438 {
439 	assert(gd == &gic_data);
440 
441 #if defined(CFG_ARM_GICV3)
442 	write_icc_eoir1(eoir);
443 #else
444 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
445 #endif
446 }
447 
448 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
449 {
450 	size_t idx = it / NUM_INTS_PER_REG;
451 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
452 
453 	assert(gd == &gic_data);
454 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
455 }
456 
457 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
458 {
459 	size_t idx = it / NUM_INTS_PER_REG;
460 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
461 
462 	assert(gd == &gic_data);
463 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
464 }
465 
466 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
467 {
468 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
469 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
470 				ITARGETSR_FIELD_BITS;
471 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
472 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
473 
474 	assert(gd == &gic_data);
475 	return (target & target_mask) >> target_shift;
476 }
477 
478 void gic_dump_state(void)
479 {
480 	struct gic_data *gd = &gic_data;
481 	int i = 0;
482 
483 #if defined(CFG_ARM_GICV3)
484 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
485 #else
486 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
487 #endif
488 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
489 
490 	for (i = 0; i <= (int)gd->max_it; i++) {
491 		if (gic_it_is_enabled(gd, i)) {
492 			DMSG("irq%d: enabled, group:%d, target:%x", i,
493 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
494 		}
495 	}
496 }
497 
498 static void __maybe_unused gic_native_itr_handler(void)
499 {
500 	struct gic_data *gd = &gic_data;
501 	uint32_t iar = 0;
502 	uint32_t id = 0;
503 
504 	iar = gic_read_iar(gd);
505 	id = iar & GICC_IAR_IT_ID_MASK;
506 
507 	if (id <= gd->max_it)
508 		interrupt_call_handlers(&gd->chip, id);
509 	else
510 		DMSG("ignoring interrupt %" PRIu32, id);
511 
512 	gic_write_eoir(gd, iar);
513 }
514 
515 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
516 /* Override interrupt_main_handler() with driver implementation */
517 void interrupt_main_handler(void)
518 {
519 	gic_native_itr_handler();
520 }
521 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
522 
523 static void gic_op_add(struct itr_chip *chip, size_t it,
524 		       uint32_t type __unused,
525 		       uint32_t prio __unused)
526 {
527 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
528 
529 	assert(gd == &gic_data);
530 
531 	if (it > gd->max_it)
532 		panic();
533 
534 	gic_it_add(gd, it);
535 	/* Set the CPU mask to deliver interrupts to any online core */
536 	gic_it_set_cpu_mask(gd, it, 0xff);
537 	gic_it_set_prio(gd, it, 0x1);
538 }
539 
540 static void gic_op_enable(struct itr_chip *chip, size_t it)
541 {
542 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
543 
544 	assert(gd == &gic_data);
545 
546 	if (it > gd->max_it)
547 		panic();
548 
549 	gic_it_enable(gd, it);
550 }
551 
552 static void gic_op_disable(struct itr_chip *chip, size_t it)
553 {
554 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
555 
556 	assert(gd == &gic_data);
557 
558 	if (it > gd->max_it)
559 		panic();
560 
561 	gic_it_disable(gd, it);
562 }
563 
564 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
565 {
566 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
567 
568 	assert(gd == &gic_data);
569 
570 	if (it > gd->max_it)
571 		panic();
572 
573 	gic_it_set_pending(gd, it);
574 }
575 
576 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
577 			uint8_t cpu_mask)
578 {
579 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
580 
581 	assert(gd == &gic_data);
582 
583 	/* Should be Software Generated Interrupt */
584 	assert(it < NUM_SGI);
585 
586 	if (it > gd->max_it)
587 		panic();
588 
589 	if (it < NUM_NS_SGI)
590 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
591 	else
592 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
593 }
594 
595 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
596 			uint8_t cpu_mask)
597 {
598 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
599 
600 	assert(gd == &gic_data);
601 
602 	if (it > gd->max_it)
603 		panic();
604 
605 	gic_it_set_cpu_mask(gd, it, cpu_mask);
606 }
607