1 /* 2 * Copyright (c) 2020-2022, MediaTek Inc. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <common/debug.h> 9 #include <drivers/arm/gic_common.h> 10 #include <lib/mmio.h> 11 12 #include <mt_cirq.h> 13 #include <mt_gic_v3.h> 14 15 static struct cirq_events cirq_all_events = { 16 .spi_start = CIRQ_SPI_START, 17 }; 18 static uint32_t already_cloned; 19 /* 20 * mt_irq_mask_restore: restore all interrupts 21 * @mask: pointer to struct mtk_irq_mask for storing the original mask value. 22 * Return 0 for success; return negative values for failure. 23 * (This is ONLY used for the idle current measurement by the factory mode.) 24 */ 25 int mt_irq_mask_restore(struct mtk_irq_mask *mask) 26 { 27 if (mask == NULL) { 28 return -1; 29 } 30 if (mask->header != IRQ_MASK_HEADER) { 31 return -1; 32 } 33 if (mask->footer != IRQ_MASK_FOOTER) { 34 return -1; 35 } 36 37 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x4), 38 mask->mask1); 39 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x8), 40 mask->mask2); 41 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0xc), 42 mask->mask3); 43 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x10), 44 mask->mask4); 45 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x14), 46 mask->mask5); 47 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x18), 48 mask->mask6); 49 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x1c), 50 mask->mask7); 51 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x20), 52 mask->mask8); 53 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x24), 54 mask->mask9); 55 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x28), 56 mask->mask10); 57 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x2c), 58 mask->mask11); 59 mmio_write_32((BASE_GICD_BASE + GICD_ISENABLER + 0x30), 60 mask->mask12); 61 /* make sure dist changes happen */ 62 dsb(); 63 64 return 0; 65 } 66 67 /* 68 * mt_irq_mask_all: disable all interrupts 69 * @mask: pointer to struct mtk_irq_mask for storing the original mask value. 70 * Return 0 for success; return negative values for failure. 71 * (This is ONLY used for the idle current measurement by the factory mode.) 72 */ 73 int mt_irq_mask_all(struct mtk_irq_mask *mask) 74 { 75 if (mask != NULL) { 76 /* for SPI */ 77 mask->mask1 = mmio_read_32((BASE_GICD_BASE + 78 GICD_ISENABLER + 0x4)); 79 mask->mask2 = mmio_read_32((BASE_GICD_BASE + 80 GICD_ISENABLER + 0x8)); 81 mask->mask3 = mmio_read_32((BASE_GICD_BASE + 82 GICD_ISENABLER + 0xc)); 83 mask->mask4 = mmio_read_32((BASE_GICD_BASE + 84 GICD_ISENABLER + 0x10)); 85 mask->mask5 = mmio_read_32((BASE_GICD_BASE + 86 GICD_ISENABLER + 0x14)); 87 mask->mask6 = mmio_read_32((BASE_GICD_BASE + 88 GICD_ISENABLER + 0x18)); 89 mask->mask7 = mmio_read_32((BASE_GICD_BASE + 90 GICD_ISENABLER + 0x1c)); 91 mask->mask8 = mmio_read_32((BASE_GICD_BASE + 92 GICD_ISENABLER + 0x20)); 93 mask->mask9 = mmio_read_32((BASE_GICD_BASE + 94 GICD_ISENABLER + 0x24)); 95 mask->mask10 = mmio_read_32((BASE_GICD_BASE + 96 GICD_ISENABLER + 0x28)); 97 mask->mask11 = mmio_read_32((BASE_GICD_BASE + 98 GICD_ISENABLER + 0x2c)); 99 mask->mask12 = mmio_read_32((BASE_GICD_BASE + 100 GICD_ISENABLER + 0x30)); 101 102 /* for SPI */ 103 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x4), 104 0xFFFFFFFF); 105 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x8), 106 0xFFFFFFFF); 107 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0xC), 108 0xFFFFFFFF); 109 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x10), 110 0xFFFFFFFF); 111 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x14), 112 0xFFFFFFFF); 113 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x18), 114 0xFFFFFFFF); 115 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x1C), 116 0xFFFFFFFF); 117 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x20), 118 0xFFFFFFFF); 119 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x24), 120 0xFFFFFFFF); 121 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x28), 122 0xFFFFFFFF); 123 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x2c), 124 0xFFFFFFFF); 125 mmio_write_32((BASE_GICD_BASE + GICD_ICENABLER + 0x30), 126 0xFFFFFFFF); 127 /* make sure distributor changes happen */ 128 dsb(); 129 130 mask->header = IRQ_MASK_HEADER; 131 mask->footer = IRQ_MASK_FOOTER; 132 133 return 0; 134 } else { 135 return -1; 136 } 137 } 138 139 static uint32_t mt_irq_get_pol(uint32_t irq) 140 { 141 #ifdef CIRQ_WITH_POLARITY 142 uint32_t reg; 143 uint32_t base = INT_POL_CTL0; 144 145 if (irq < 32U) { 146 return 0; 147 } 148 149 reg = ((irq - 32U) / 32U); 150 151 return mmio_read_32(base + reg * 4U); 152 #else 153 return 0; 154 #endif 155 } 156 157 unsigned int mt_irq_get_sens(unsigned int irq) 158 { 159 unsigned int config; 160 161 /* 162 * 2'b10 edge 163 * 2'b01 level 164 */ 165 config = mmio_read_32(MT_GIC_BASE + GICD_ICFGR + (irq / 16U) * 4U); 166 config = (config >> (irq % 16U) * 2U) & 0x3; 167 168 return config; 169 } 170 171 static void collect_all_wakeup_events(void) 172 { 173 unsigned int i; 174 uint32_t gic_irq; 175 uint32_t cirq; 176 uint32_t cirq_reg; 177 uint32_t cirq_offset; 178 uint32_t mask; 179 uint32_t pol_mask; 180 uint32_t irq_offset; 181 uint32_t irq_mask; 182 183 if ((cirq_all_events.wakeup_events == NULL) || 184 cirq_all_events.num_of_events == 0U) { 185 return; 186 } 187 188 for (i = 0U; i < cirq_all_events.num_of_events; i++) { 189 if (cirq_all_events.wakeup_events[i] > 0U) { 190 gic_irq = cirq_all_events.wakeup_events[i]; 191 cirq = gic_irq - cirq_all_events.spi_start - 32U; 192 cirq_reg = cirq / 32U; 193 cirq_offset = cirq % 32U; 194 mask = 0x1 << cirq_offset; 195 irq_offset = gic_irq % 32U; 196 irq_mask = 0x1 << irq_offset; 197 /* 198 * CIRQ default masks all 199 */ 200 cirq_all_events.table[cirq_reg].mask |= mask; 201 /* 202 * CIRQ default pol is low 203 */ 204 pol_mask = mt_irq_get_pol( 205 cirq_all_events.wakeup_events[i]) 206 & irq_mask; 207 /* 208 * 0 means rising 209 */ 210 if (pol_mask == 0U) { 211 cirq_all_events.table[cirq_reg].pol |= mask; 212 } 213 /* 214 * CIRQ could monitor edge/level trigger 215 * cirq register (0: edge, 1: level) 216 */ 217 if (mt_irq_get_sens(cirq_all_events.wakeup_events[i]) 218 == SENS_EDGE) { 219 cirq_all_events.table[cirq_reg].sen |= mask; 220 } 221 222 cirq_all_events.table[cirq_reg].used = 1U; 223 cirq_all_events.table[cirq_reg].reg_num = cirq_reg; 224 } 225 } 226 } 227 228 /* 229 * mt_cirq_set_pol: Set the polarity for the specified SYS_CIRQ number. 230 * @cirq_num: the SYS_CIRQ number to set 231 * @pol: polarity to set 232 * @return: 233 * 0: set pol success 234 * -1: cirq num is out of range 235 */ 236 #ifdef CIRQ_WITH_POLARITY 237 static int mt_cirq_set_pol(uint32_t cirq_num, uint32_t pol) 238 { 239 uint32_t base; 240 uint32_t bit = 1U << (cirq_num % 32U); 241 242 if (cirq_num >= CIRQ_IRQ_NUM) { 243 return -1; 244 } 245 246 if (pol == MT_CIRQ_POL_NEG) { 247 base = (cirq_num / 32U) * 4U + CIRQ_POL_CLR_BASE; 248 } else if (pol == MT_CIRQ_POL_POS) { 249 base = (cirq_num / 32U) * 4U + CIRQ_POL_SET_BASE; 250 } else { 251 return -1; 252 } 253 254 mmio_write_32(base, bit); 255 return 0; 256 } 257 #endif 258 259 /* 260 * mt_cirq_mask: Mask the specified SYS_CIRQ. 261 * @cirq_num: the SYS_CIRQ number to mask 262 * @return: 263 * 0: mask success 264 * -1: cirq num is out of range 265 */ 266 static int mt_cirq_mask(uint32_t cirq_num) 267 { 268 uint32_t bit = 1U << (cirq_num % 32U); 269 270 if (cirq_num >= CIRQ_IRQ_NUM) { 271 return -1; 272 } 273 274 mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_SET_BASE, bit); 275 276 return 0; 277 } 278 279 /* 280 * mt_cirq_unmask: Unmask the specified SYS_CIRQ. 281 * @cirq_num: the SYS_CIRQ number to unmask 282 * @return: 283 * 0: umask success 284 * -1: cirq num is out of range 285 */ 286 static int mt_cirq_unmask(uint32_t cirq_num) 287 { 288 uint32_t bit = 1U << (cirq_num % 32U); 289 290 if (cirq_num >= CIRQ_IRQ_NUM) { 291 return -1; 292 } 293 294 mmio_write_32((cirq_num / 32U) * 4U + CIRQ_MASK_CLR_BASE, bit); 295 296 return 0; 297 } 298 299 uint32_t mt_irq_get_en(uint32_t irq) 300 { 301 uint32_t addr, st, val; 302 303 addr = BASE_GICD_BASE + GICD_ISENABLER + (irq / 32U) * 4U; 304 st = mmio_read_32(addr); 305 306 val = (st >> (irq % 32U)) & 1U; 307 308 return val; 309 } 310 311 static void __cirq_fast_clone(void) 312 { 313 struct cirq_reg *reg; 314 unsigned int i; 315 316 for (i = 0U; i < CIRQ_REG_NUM ; ++i) { 317 uint32_t cirq_bit; 318 319 reg = &cirq_all_events.table[i]; 320 321 if (reg->used == 0U) { 322 continue; 323 } 324 325 mmio_write_32(CIRQ_SENS_CLR_BASE + (reg->reg_num * 4U), 326 reg->sen); 327 328 for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) { 329 uint32_t val, cirq_id; 330 uint32_t gic_id; 331 #ifdef CIRQ_WITH_POLARITY 332 uint32_t gic_bit, pol; 333 #endif 334 uint32_t en; 335 336 val = ((1U << cirq_bit) & reg->mask); 337 338 if (val == 0U) { 339 continue; 340 } 341 342 cirq_id = (reg->reg_num << 5U) + cirq_bit; 343 gic_id = CIRQ_TO_IRQ_NUM(cirq_id); 344 #ifdef CIRQ_WITH_POLARITY 345 gic_bit = (0x1U << ((gic_id - 32U) % 32U)); 346 pol = mt_irq_get_pol(gic_id) & gic_bit; 347 if (pol != 0U) { 348 mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_NEG); 349 } else { 350 mt_cirq_set_pol(cirq_id, MT_CIRQ_POL_POS); 351 } 352 #endif 353 en = mt_irq_get_en(gic_id); 354 if (en == 1U) { 355 mt_cirq_unmask(cirq_id); 356 } else { 357 mt_cirq_mask(cirq_id); 358 } 359 } 360 } 361 } 362 363 static void cirq_fast_clone(void) 364 { 365 if (already_cloned == 0U) { 366 collect_all_wakeup_events(); 367 already_cloned = 1U; 368 } 369 __cirq_fast_clone(); 370 } 371 372 void set_wakeup_sources(uint32_t *list, uint32_t num_of_events) 373 { 374 cirq_all_events.num_of_events = num_of_events; 375 cirq_all_events.wakeup_events = list; 376 } 377 /* 378 * mt_cirq_clone_gic: Copy the setting from GIC to SYS_CIRQ 379 */ 380 void mt_cirq_clone_gic(void) 381 { 382 cirq_fast_clone(); 383 } 384 385 uint32_t mt_irq_get_pending_vec(uint32_t start_irq) 386 { 387 uint32_t base = 0U; 388 uint32_t pending_vec = 0U; 389 uint32_t reg = start_irq / 32U; 390 uint32_t LSB_num, MSB_num; 391 uint32_t LSB_vec, MSB_vec; 392 393 base = BASE_GICD_BASE; 394 395 /* if start_irq is not aligned 32, do some assembling */ 396 MSB_num = start_irq % 32U; 397 if (MSB_num != 0U) { 398 LSB_num = 32U - MSB_num; 399 LSB_vec = mmio_read_32(base + GICD_ISPENDR + 400 reg * 4U) >> MSB_num; 401 MSB_vec = mmio_read_32(base + GICD_ISPENDR + 402 (reg + 1U) * 4U) << LSB_num; 403 pending_vec = MSB_vec | LSB_vec; 404 } else { 405 pending_vec = mmio_read_32(base + GICD_ISPENDR + reg * 4); 406 } 407 408 return pending_vec; 409 } 410 411 static int mt_cirq_get_mask_vec(unsigned int i) 412 { 413 return mmio_read_32((i * 4U) + CIRQ_MASK_BASE); 414 } 415 416 /* 417 * mt_cirq_ack_all: Ack all the interrupt on SYS_CIRQ 418 */ 419 void mt_cirq_ack_all(void) 420 { 421 uint32_t ack_vec, pend_vec, mask_vec; 422 unsigned int i; 423 424 for (i = 0; i < CIRQ_CTRL_REG_NUM; i++) { 425 /* 426 * if a irq is pending & not masked, don't ack it 427 * , since cirq start irq might not be 32 aligned with gic, 428 * need an exotic API to get proper vector of pending irq 429 */ 430 pend_vec = mt_irq_get_pending_vec(CIRQ_SPI_START 431 + (i + 1U) * 32U); 432 mask_vec = mt_cirq_get_mask_vec(i); 433 /* those should be acked are: "not (pending & not masked)", 434 */ 435 ack_vec = (~pend_vec) | mask_vec; 436 mmio_write_32(CIRQ_ACK_BASE + (i * 4U), ack_vec); 437 } 438 439 /* 440 * make sure all cirq setting take effect 441 * before doing other things 442 */ 443 dsb(); 444 } 445 /* 446 * mt_cirq_enable: Enable SYS_CIRQ 447 */ 448 void mt_cirq_enable(void) 449 { 450 uint32_t st; 451 452 /* level only */ 453 mt_cirq_ack_all(); 454 455 st = mmio_read_32(CIRQ_CON); 456 /* 457 * CIRQ could monitor edge/level trigger 458 */ 459 st |= (CIRQ_CON_EN << CIRQ_CON_EN_BITS); 460 461 mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK)); 462 } 463 464 /* 465 * mt_cirq_disable: Disable SYS_CIRQ 466 */ 467 void mt_cirq_disable(void) 468 { 469 uint32_t st; 470 471 st = mmio_read_32(CIRQ_CON); 472 st &= ~(CIRQ_CON_EN << CIRQ_CON_EN_BITS); 473 mmio_write_32(CIRQ_CON, (st & CIRQ_CON_BITS_MASK)); 474 } 475 476 void mt_irq_unmask_for_sleep_ex(uint32_t irq) 477 { 478 uint32_t mask; 479 480 mask = 1U << (irq % 32U); 481 482 mmio_write_32(BASE_GICD_BASE + GICD_ISENABLER + 483 ((irq / 32U) * 4U), mask); 484 } 485 486 void mt_cirq_mask_all(void) 487 { 488 unsigned int i; 489 490 for (i = 0U; i < CIRQ_CTRL_REG_NUM; i++) { 491 mmio_write_32(CIRQ_MASK_SET_BASE + (i * 4U), 0xFFFFFFFF); 492 } 493 dsb(); 494 } 495 496 static void cirq_fast_sw_flush(void) 497 { 498 struct cirq_reg *reg; 499 unsigned int i; 500 501 for (i = 0U; i < CIRQ_REG_NUM ; ++i) { 502 uint32_t cirq_bit; 503 504 reg = &cirq_all_events.table[i]; 505 506 if (reg->used == 0U) { 507 continue; 508 } 509 510 reg->pending = mmio_read_32(CIRQ_STA_BASE + 511 (reg->reg_num << 2U)); 512 reg->pending &= reg->mask; 513 514 for (cirq_bit = 0U; cirq_bit < 32U; ++cirq_bit) { 515 uint32_t val, cirq_id; 516 517 val = (1U << cirq_bit) & reg->pending; 518 if (val == 0U) { 519 continue; 520 } 521 522 cirq_id = (reg->reg_num << 5U) + cirq_bit; 523 mt_irq_set_pending(CIRQ_TO_IRQ_NUM(cirq_id)); 524 if (CIRQ_TO_IRQ_NUM(cirq_id) == MD_WDT_IRQ_BIT_ID) { 525 INFO("Set MD_WDT_IRQ pending in %s\n", 526 __func__); 527 } 528 } 529 } 530 } 531 532 /* 533 * mt_cirq_disable: Flush interrupt from SYS_CIRQ to GIC 534 */ 535 void mt_cirq_flush(void) 536 { 537 cirq_fast_sw_flush(); 538 mt_cirq_mask_all(); 539 mt_cirq_ack_all(); 540 } 541 542 void mt_cirq_sw_reset(void) 543 { 544 uint32_t st; 545 546 st = mmio_read_32(CIRQ_CON); 547 st |= (CIRQ_SW_RESET << CIRQ_CON_SW_RST_BITS); 548 mmio_write_32(CIRQ_CON, st); 549 } 550