1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * From Coreboot northbridge/intel/sandybridge/northbridge.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007-2009 coresystems GmbH
5*4882a593Smuzhiyun * Copyright (C) 2011 The Chromium Authors
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <common.h>
11*4882a593Smuzhiyun #include <dm.h>
12*4882a593Smuzhiyun #include <asm/msr.h>
13*4882a593Smuzhiyun #include <asm/cpu.h>
14*4882a593Smuzhiyun #include <asm/intel_regs.h>
15*4882a593Smuzhiyun #include <asm/io.h>
16*4882a593Smuzhiyun #include <asm/pci.h>
17*4882a593Smuzhiyun #include <asm/processor.h>
18*4882a593Smuzhiyun #include <asm/arch/pch.h>
19*4882a593Smuzhiyun #include <asm/arch/model_206ax.h>
20*4882a593Smuzhiyun #include <asm/arch/sandybridge.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
23*4882a593Smuzhiyun
bridge_silicon_revision(struct udevice * dev)24*4882a593Smuzhiyun int bridge_silicon_revision(struct udevice *dev)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct cpuid_result result;
27*4882a593Smuzhiyun u16 bridge_id;
28*4882a593Smuzhiyun u8 stepping;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun result = cpuid(1);
31*4882a593Smuzhiyun stepping = result.eax & 0xf;
32*4882a593Smuzhiyun dm_pci_read_config16(dev, PCI_DEVICE_ID, &bridge_id);
33*4882a593Smuzhiyun bridge_id &= 0xf0;
34*4882a593Smuzhiyun return bridge_id | stepping;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Reserve everything between A segment and 1MB:
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * 0xa0000 - 0xbffff: legacy VGA
41*4882a593Smuzhiyun * 0xc0000 - 0xcffff: VGA OPROM (needed by kernel)
42*4882a593Smuzhiyun * 0xe0000 - 0xfffff: SeaBIOS, if used, otherwise DMI
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun static const int legacy_hole_base_k = 0xa0000 / 1024;
45*4882a593Smuzhiyun static const int legacy_hole_size_k = 384;
46*4882a593Smuzhiyun
get_pcie_bar(struct udevice * dev,u32 * base,u32 * len)47*4882a593Smuzhiyun static int get_pcie_bar(struct udevice *dev, u32 *base, u32 *len)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun u32 pciexbar_reg;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun *base = 0;
52*4882a593Smuzhiyun *len = 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun dm_pci_read_config32(dev, PCIEXBAR, &pciexbar_reg);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (!(pciexbar_reg & (1 << 0)))
57*4882a593Smuzhiyun return 0;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun switch ((pciexbar_reg >> 1) & 3) {
60*4882a593Smuzhiyun case 0: /* 256MB */
61*4882a593Smuzhiyun *base = pciexbar_reg & ((1 << 31) | (1 << 30) | (1 << 29) |
62*4882a593Smuzhiyun (1 << 28));
63*4882a593Smuzhiyun *len = 256 * 1024 * 1024;
64*4882a593Smuzhiyun return 1;
65*4882a593Smuzhiyun case 1: /* 128M */
66*4882a593Smuzhiyun *base = pciexbar_reg & ((1 << 31) | (1 << 30) | (1 << 29) |
67*4882a593Smuzhiyun (1 << 28) | (1 << 27));
68*4882a593Smuzhiyun *len = 128 * 1024 * 1024;
69*4882a593Smuzhiyun return 1;
70*4882a593Smuzhiyun case 2: /* 64M */
71*4882a593Smuzhiyun *base = pciexbar_reg & ((1 << 31) | (1 << 30) | (1 << 29) |
72*4882a593Smuzhiyun (1 << 28) | (1 << 27) | (1 << 26));
73*4882a593Smuzhiyun *len = 64 * 1024 * 1024;
74*4882a593Smuzhiyun return 1;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
add_fixed_resources(struct udevice * dev,int index)80*4882a593Smuzhiyun static void add_fixed_resources(struct udevice *dev, int index)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun u32 pcie_config_base, pcie_config_size;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (get_pcie_bar(dev, &pcie_config_base, &pcie_config_size)) {
85*4882a593Smuzhiyun debug("Adding PCIe config bar base=0x%08x size=0x%x\n",
86*4882a593Smuzhiyun pcie_config_base, pcie_config_size);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
northbridge_dmi_init(struct udevice * dev,int rev)90*4882a593Smuzhiyun static void northbridge_dmi_init(struct udevice *dev, int rev)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun /* Clear error status bits */
93*4882a593Smuzhiyun writel(0xffffffff, DMIBAR_REG(0x1c4));
94*4882a593Smuzhiyun writel(0xffffffff, DMIBAR_REG(0x1d0));
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Steps prior to DMI ASPM */
97*4882a593Smuzhiyun if ((rev & BASE_REV_MASK) == BASE_REV_SNB) {
98*4882a593Smuzhiyun clrsetbits_le32(DMIBAR_REG(0x250), (1 << 22) | (1 << 20),
99*4882a593Smuzhiyun 1 << 21);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun setbits_le32(DMIBAR_REG(0x238), 1 << 29);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (rev >= SNB_STEP_D0) {
105*4882a593Smuzhiyun setbits_le32(DMIBAR_REG(0x1f8), 1 << 16);
106*4882a593Smuzhiyun } else if (rev >= SNB_STEP_D1) {
107*4882a593Smuzhiyun clrsetbits_le32(DMIBAR_REG(0x1f8), 1 << 26, 1 << 16);
108*4882a593Smuzhiyun setbits_le32(DMIBAR_REG(0x1fc), (1 << 12) | (1 << 23));
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Enable ASPM on SNB link, should happen before PCH link */
112*4882a593Smuzhiyun if ((rev & BASE_REV_MASK) == BASE_REV_SNB)
113*4882a593Smuzhiyun setbits_le32(DMIBAR_REG(0xd04), 1 << 4);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun setbits_le32(DMIBAR_REG(0x88), (1 << 1) | (1 << 0));
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
northbridge_init(struct udevice * dev,int rev)118*4882a593Smuzhiyun static void northbridge_init(struct udevice *dev, int rev)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u32 bridge_type;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun add_fixed_resources(dev, 6);
123*4882a593Smuzhiyun northbridge_dmi_init(dev, rev);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun bridge_type = readl(MCHBAR_REG(0x5f10));
126*4882a593Smuzhiyun bridge_type &= ~0xff;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if ((rev & BASE_REV_MASK) == BASE_REV_IVB) {
129*4882a593Smuzhiyun /* Enable Power Aware Interrupt Routing - fixed priority */
130*4882a593Smuzhiyun clrsetbits_8(MCHBAR_REG(0x5418), 0xf, 0x4);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* 30h for IvyBridge */
133*4882a593Smuzhiyun bridge_type |= 0x30;
134*4882a593Smuzhiyun } else {
135*4882a593Smuzhiyun /* 20h for Sandybridge */
136*4882a593Smuzhiyun bridge_type |= 0x20;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun writel(bridge_type, MCHBAR_REG(0x5f10));
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Set bit 0 of BIOS_RESET_CPL to indicate to the CPU
142*4882a593Smuzhiyun * that BIOS has initialized memory and power management
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun setbits_8(MCHBAR_REG(BIOS_RESET_CPL), 1);
145*4882a593Smuzhiyun debug("Set BIOS_RESET_CPL\n");
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Configure turbo power limits 1ms after reset complete bit */
148*4882a593Smuzhiyun mdelay(1);
149*4882a593Smuzhiyun set_power_limits(28);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * CPUs with configurable TDP also need power limits set
153*4882a593Smuzhiyun * in MCHBAR. Use same values from MSR_PKG_POWER_LIMIT.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun if (cpu_config_tdp_levels()) {
156*4882a593Smuzhiyun msr_t msr = msr_read(MSR_PKG_POWER_LIMIT);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun writel(msr.lo, MCHBAR_REG(0x59A0));
159*4882a593Smuzhiyun writel(msr.hi, MCHBAR_REG(0x59A4));
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Set here before graphics PM init */
163*4882a593Smuzhiyun writel(0x00100001, MCHBAR_REG(0x5500));
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
sandybridge_setup_northbridge_bars(struct udevice * dev)166*4882a593Smuzhiyun static void sandybridge_setup_northbridge_bars(struct udevice *dev)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun /* Set up all hardcoded northbridge BARs */
169*4882a593Smuzhiyun debug("Setting up static registers\n");
170*4882a593Smuzhiyun dm_pci_write_config32(dev, EPBAR, DEFAULT_EPBAR | 1);
171*4882a593Smuzhiyun dm_pci_write_config32(dev, EPBAR + 4, (0LL + DEFAULT_EPBAR) >> 32);
172*4882a593Smuzhiyun dm_pci_write_config32(dev, MCHBAR, MCH_BASE_ADDRESS | 1);
173*4882a593Smuzhiyun dm_pci_write_config32(dev, MCHBAR + 4, (0LL + MCH_BASE_ADDRESS) >> 32);
174*4882a593Smuzhiyun /* 64MB - busses 0-63 */
175*4882a593Smuzhiyun dm_pci_write_config32(dev, PCIEXBAR, DEFAULT_PCIEXBAR | 5);
176*4882a593Smuzhiyun dm_pci_write_config32(dev, PCIEXBAR + 4,
177*4882a593Smuzhiyun (0LL + DEFAULT_PCIEXBAR) >> 32);
178*4882a593Smuzhiyun dm_pci_write_config32(dev, DMIBAR, DEFAULT_DMIBAR | 1);
179*4882a593Smuzhiyun dm_pci_write_config32(dev, DMIBAR + 4, (0LL + DEFAULT_DMIBAR) >> 32);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Set C0000-FFFFF to access RAM on both reads and writes */
182*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM0, 0x30);
183*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM1, 0x33);
184*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM2, 0x33);
185*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM3, 0x33);
186*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM4, 0x33);
187*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM5, 0x33);
188*4882a593Smuzhiyun dm_pci_write_config8(dev, PAM6, 0x33);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
bd82x6x_northbridge_early_init(struct udevice * dev)191*4882a593Smuzhiyun static int bd82x6x_northbridge_early_init(struct udevice *dev)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun const int chipset_type = SANDYBRIDGE_MOBILE;
194*4882a593Smuzhiyun u32 capid0_a;
195*4882a593Smuzhiyun u8 reg8;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Device ID Override Enable should be done very early */
198*4882a593Smuzhiyun dm_pci_read_config32(dev, 0xe4, &capid0_a);
199*4882a593Smuzhiyun if (capid0_a & (1 << 10)) {
200*4882a593Smuzhiyun dm_pci_read_config8(dev, 0xf3, ®8);
201*4882a593Smuzhiyun reg8 &= ~7; /* Clear 2:0 */
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (chipset_type == SANDYBRIDGE_MOBILE)
204*4882a593Smuzhiyun reg8 |= 1; /* Set bit 0 */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun dm_pci_write_config8(dev, 0xf3, reg8);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun sandybridge_setup_northbridge_bars(dev);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Device Enable */
212*4882a593Smuzhiyun dm_pci_write_config32(dev, DEVEN, DEVEN_HOST | DEVEN_IGD);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
bd82x6x_northbridge_probe(struct udevice * dev)217*4882a593Smuzhiyun static int bd82x6x_northbridge_probe(struct udevice *dev)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun int rev;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!(gd->flags & GD_FLG_RELOC))
222*4882a593Smuzhiyun return bd82x6x_northbridge_early_init(dev);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun rev = bridge_silicon_revision(dev);
225*4882a593Smuzhiyun northbridge_init(dev, rev);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun static const struct udevice_id bd82x6x_northbridge_ids[] = {
231*4882a593Smuzhiyun { .compatible = "intel,bd82x6x-northbridge" },
232*4882a593Smuzhiyun { }
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun U_BOOT_DRIVER(bd82x6x_northbridge_drv) = {
236*4882a593Smuzhiyun .name = "bd82x6x_northbridge",
237*4882a593Smuzhiyun .id = UCLASS_NORTHBRIDGE,
238*4882a593Smuzhiyun .of_match = bd82x6x_northbridge_ids,
239*4882a593Smuzhiyun .probe = bd82x6x_northbridge_probe,
240*4882a593Smuzhiyun };
241