Lines Matching +full:0 +full:x42800000

37 #define PPU_PROFILING            0
44 #define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
45 #define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
54 #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
62 #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
68 #define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
107 PASSTHRU_IGNORE = 0,
129 #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
130 #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
131 #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
132 #define GET_POLARITY(x) ((x & 0x00000002) >> 1)
133 #define GET_COUNT_CYCLES(x) (x & 0x00000001)
134 #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
157 * 0 - even virtual cpus 0, 2, 4,...
197 passthru, paddr >> 32, paddr & 0xffffffff, length); in rtas_ibm_cbe_perftools()
218 pm_signal_local.sub_unit = 0; in pm_rtas_reset_signals()
219 pm_signal_local.bit = 0; in pm_rtas_reset_signals()
249 i = 0; in pm_rtas_activate_signals()
250 for (j = 0; j < count; j++) { in pm_rtas_activate_signals()
264 if (i != 0) { in pm_rtas_activate_signals()
276 return 0; in pm_rtas_activate_signals()
295 p->sub_unit = 0; in set_pm_event()
296 p->bit = 0; in set_pm_event()
299 pm_regs.pm07_cntrl[ctr] = 0; in set_pm_event()
315 pm_regs.pm07_cntrl[ctr] = 0; in set_pm_event()
329 if (input_control == 0) { in set_pm_event()
332 if (bus_word == 0x3) in set_pm_event()
333 bus_word = 0x2; in set_pm_event()
334 else if (bus_word == 0xc) in set_pm_event()
335 bus_word = 0x8; in set_pm_event()
338 if ((bus_type == 0) && p->signal_group >= 60) in set_pm_event()
341 bus_type = 0; in set_pm_event()
345 pm_regs.pm07_cntrl[ctr] = 0; in set_pm_event()
349 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) { in set_pm_event()
354 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { in set_pm_event()
355 if (input_bus[j] == 0xff) { in set_pm_event()
372 * Oprofile will use 32 bit counters, set bits 7:10 to 0 in write_pm_cntrl()
376 u32 val = 0; in write_pm_cntrl()
383 if (pm_regs.pm_cntrl.trace_mode != 0) in write_pm_cntrl()
469 pm_regs.group_control = 0; in cell_virtual_cntr()
470 pm_regs.debug_bus_control = 0; in cell_virtual_cntr()
472 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) in cell_virtual_cntr()
473 input_bus[i] = 0xff; in cell_virtual_cntr()
479 for (i = 0; i < num_counters; i++) in cell_virtual_cntr()
498 for (i = 0; i < num_counters; i++) { in cell_virtual_cntr()
503 == 0xFFFFFFFF) in cell_virtual_cntr()
504 /* If the cntr value is 0xffffffff, we must in cell_virtual_cntr()
505 * reset that to 0xfffffff0 when the current in cell_virtual_cntr()
514 cbe_write_ctr(cpu, i, 0xFFFFFFF0); in cell_virtual_cntr()
527 for (i = 0; i < num_counters; i++) { in cell_virtual_cntr()
537 cbe_write_pm07_control(cpu, i, 0); in cell_virtual_cntr()
554 timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0); in start_virt_cntrs()
562 spu_cycle_reset = ctr[0].count; in cell_reg_setup_spu_cycles()
576 return 0; in cell_reg_setup_spu_cycles()
596 /* enable interrupts on cntr 0 */ in spu_evnt_swap()
597 interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0); in spu_evnt_swap()
599 hdw_thread = 0; in spu_evnt_swap()
609 spu_evnt_phys_spu_indx = 0; in spu_evnt_swap()
611 pm_signal[0].sub_unit = spu_evnt_phys_spu_indx; in spu_evnt_swap()
634 = cbe_read_ctr(cpu, 0); in spu_evnt_swap()
638 * counter value is at max (0xFFFFFFFF). in spu_evnt_swap()
640 if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF) in spu_evnt_swap()
641 cbe_write_ctr(cpu, 0, 0xFFFFFFF0); in spu_evnt_swap()
643 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]); in spu_evnt_swap()
658 cbe_write_pm(cpu, trace_address, 0); in spu_evnt_swap()
660 enable_ctr(cpu, 0, pm_regs.pm07_cntrl); in spu_evnt_swap()
676 timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0); in start_spu_event_swap()
688 spu_evnt_phys_spu_indx = 0; in cell_reg_setup_spu_events()
716 pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus in cell_reg_setup_spu_events()
720 * Note, pm_signal[0] will be filled in by set_pm_event() call below. in cell_reg_setup_spu_events()
736 set_pm_event(0, ctr[0].event, ctr[0].unit_mask); in cell_reg_setup_spu_events()
738 reset_value[0] = 0xFFFFFFFF - ctr[0].count; in cell_reg_setup_spu_events()
744 for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++) in cell_reg_setup_spu_events()
745 spu_pm_cnt[i] = reset_value[0]; in cell_reg_setup_spu_events()
747 return 0; in cell_reg_setup_spu_events()
768 /* Setup the thread 0 events */ in cell_reg_setup_ppu()
769 for (i = 0; i < num_ctrs; ++i) { in cell_reg_setup_ppu()
771 pmc_cntrl[0][i].evnts = ctr[i].event; in cell_reg_setup_ppu()
772 pmc_cntrl[0][i].masks = ctr[i].unit_mask; in cell_reg_setup_ppu()
773 pmc_cntrl[0][i].enabled = ctr[i].enabled; in cell_reg_setup_ppu()
774 pmc_cntrl[0][i].vcntr = i; in cell_reg_setup_ppu()
777 per_cpu(pmc_values, j)[i] = 0; in cell_reg_setup_ppu()
781 * Setup the thread 1 events, map the thread 0 event to the in cell_reg_setup_ppu()
784 for (i = 0; i < num_ctrs; ++i) { in cell_reg_setup_ppu()
799 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) in cell_reg_setup_ppu()
800 input_bus[i] = 0xff; in cell_reg_setup_ppu()
809 for (i = 0; i < num_counters; ++i) { in cell_reg_setup_ppu()
810 /* start with virtual counter set 0 */ in cell_reg_setup_ppu()
811 if (pmc_cntrl[0][i].enabled) { in cell_reg_setup_ppu()
813 reset_value[i] = 0xFFFFFFFF - ctr[i].count; in cell_reg_setup_ppu()
815 pmc_cntrl[0][i].evnts, in cell_reg_setup_ppu()
816 pmc_cntrl[0][i].masks); in cell_reg_setup_ppu()
825 for (i = 0; i < num_counters; ++i) { in cell_reg_setup_ppu()
829 return 0; in cell_reg_setup_ppu()
837 int ret=0; in cell_reg_setup()
838 spu_cycle_reset = 0; in cell_reg_setup()
843 pm_regs.group_control = 0; in cell_reg_setup()
844 pm_regs.debug_bus_control = 0; in cell_reg_setup()
846 pm_regs.pm_cntrl.trace_mode = 0; in cell_reg_setup()
848 pm_regs.pm_cntrl.trace_buf_ovflw = 0; in cell_reg_setup()
849 pm_regs.pm_cntrl.spu_addr_trace = 0; in cell_reg_setup()
866 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) { in cell_reg_setup()
869 } else if ((ctr[0].event >= SPU_EVENT_NUM_START) && in cell_reg_setup()
870 (ctr[0].event <= SPU_EVENT_NUM_STOP)) { in cell_reg_setup()
872 spu_cycle_reset = ctr[0].count; in cell_reg_setup()
895 u32 num_enabled = 0; in cell_cpu_setup()
904 return 0; in cell_cpu_setup()
910 return 0; in cell_cpu_setup()
916 cbe_write_pm(cpu, pm_start_stop, 0); in cell_cpu_setup()
921 for (i = 0; i < num_counters; ++i) { in cell_cpu_setup()
949 #define MAXLFSR 0xFFFFFF
1012 * 0 to 2^16-1 ---- 0
1030 * unsigned int lfsr = 0xFFFFFF;
1034 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
1046 #define V2_16 (0x1 << 16)
1047 #define V2_19 (0x1 << 19)
1048 #define V2_22 (0x1 << 22)
1058 if ((n >> 16) == 0) in calculate_lfsr()
1059 index = 0; in calculate_lfsr()
1060 else if (((n - V2_16) >> 19) == 0) in calculate_lfsr()
1062 else if (((n - V2_16 - V2_19) >> 22) == 0) in calculate_lfsr()
1064 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0) in calculate_lfsr()
1070 if ((index >= ENTRIES) || (index < 0)) in calculate_lfsr()
1085 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) { in pm_rtas_activate_spu_profiling()
1106 return 0; in pm_rtas_activate_spu_profiling()
1113 int ret = 0; in oprof_cpufreq_notify()
1140 oprofile_running = 0; in cell_global_stop_spu_cycles()
1156 lfsr_value = 0x8f100000; in cell_global_stop_spu_cycles()
1162 if (unlikely(rtn_value != 0)) { in cell_global_stop_spu_cycles()
1179 oprofile_running = 0; in cell_global_stop_spu_events()
1191 cbe_write_pm07_control(cpu, 0, 0); in cell_global_stop_spu_events()
1212 oprofile_running = 0; in cell_global_stop_ppu()
1248 unsigned int cpu_khzfreq = 0; in cell_global_start_spu_cycles()
1258 if (ret < 0) in cell_global_start_spu_cycles()
1275 * Set perf_mon_control bit 0 to a zero before in cell_global_start_spu_cycles()
1278 cbe_write_pm(cpu, pm_control, 0); in cell_global_start_spu_cycles()
1287 if (lfsr_value == 0) in cell_global_start_spu_cycles()
1309 if (unlikely(ret != 0)) { in cell_global_start_spu_cycles()
1323 return 0; in cell_global_start_spu_cycles()
1334 u32 interrupt_mask = 0; in cell_global_start_spu_events()
1335 int rtn = 0; in cell_global_start_spu_events()
1337 hdw_thread = 0; in cell_global_start_spu_events()
1359 * Set perf_mon_control bit 0 to a zero before in cell_global_start_spu_events()
1365 cbe_write_ctr(cpu, 0, reset_value[0]); in cell_global_start_spu_events()
1366 enable_ctr(cpu, 0, pm_regs.pm07_cntrl); in cell_global_start_spu_events()
1368 CBE_PM_CTR_OVERFLOW_INTR(0); in cell_global_start_spu_events()
1371 cbe_write_pm07_control(cpu, 0, 0); in cell_global_start_spu_events()
1379 cbe_write_pm(cpu, trace_address, 0); in cell_global_start_spu_events()
1397 u32 interrupt_mask = 0; in cell_global_start_ppu()
1407 interrupt_mask = 0; in cell_global_start_ppu()
1409 for (i = 0; i < num_counters; ++i) { in cell_global_start_ppu()
1416 cbe_write_pm07_control(cpu, i, 0); in cell_global_start_ppu()
1437 return 0; in cell_global_start_ppu()
1454 * The pm_signal[0] holds the one SPU event to be measured. It is routed on
1455 * the debug bus using word 0 or 1. The value of pm_signal[1] and
1503 sample = 0xABCDEF; in cell_handle_interrupt_spu()
1504 trace_entry = 0xfedcba; in cell_handle_interrupt_spu()
1505 last_trace_buffer = 0xdeadbeaf; in cell_handle_interrupt_spu()
1507 if ((oprofile_running == 1) && (interrupt_mask != 0)) { in cell_handle_interrupt_spu()
1509 cbe_write_pm(cpu, pm_interval, 0); in cell_handle_interrupt_spu()
1511 /* only have one perf cntr being used, cntr 0 */ in cell_handle_interrupt_spu()
1512 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0)) in cell_handle_interrupt_spu()
1513 && ctr[0].enabled) in cell_handle_interrupt_spu()
1518 cbe_write_ctr(cpu, 0, reset_value[0]); in cell_handle_interrupt_spu()
1534 * HDR bits 0:15 in cell_handle_interrupt_spu()
1535 * SPU Addr 0 bits 16:31 in cell_handle_interrupt_spu()
1539 * HDR: bit4 = 1 SPU Address 0 valid in cell_handle_interrupt_spu()
1543 * Note trace_buffer[0] holds bits 0:63 of the HW in cell_handle_interrupt_spu()
1547 trace_entry = trace_buffer[0] in cell_handle_interrupt_spu()
1548 & 0x00000000FFFF0000; in cell_handle_interrupt_spu()
1554 last_trace_buffer = trace_buffer[0]; in cell_handle_interrupt_spu()
1579 cbe_write_pm(cpu, trace_address, 0); in cell_handle_interrupt_spu()
1602 unsigned long flags = 0; in cell_handle_interrupt_ppu()
1629 * 0xffffff0 to cause the interrupt to be regenerated. in cell_handle_interrupt_ppu()
1632 if ((oprofile_running == 1) && (interrupt_mask != 0)) { in cell_handle_interrupt_ppu()
1636 for (i = 0; i < num_counters; ++i) { in cell_handle_interrupt_ppu()