1 /*
2  * Designware application register space functions for Keystone PCI controller
3  *
4  * Copyright (C) 2013-2014 Texas Instruments., Ltd.
5  *		http://www.ti.com
6  *
7  * Author: Murali Karicheri <m-karicheri2@ti.com>
8  *
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_pci.h>
20 #include <linux/pci.h>
21 #include <linux/platform_device.h>
22 
23 #include "pcie-designware.h"
24 #include "pci-keystone.h"
25 
26 /* Application register defines */
27 #define LTSSM_EN_VAL		        1
28 #define LTSSM_STATE_MASK		0x1f
29 #define LTSSM_STATE_L0			0x11
30 #define DBI_CS2_EN_VAL			0x20
31 #define OB_XLAT_EN_VAL		        2
32 
33 /* Application registers */
34 #define CMD_STATUS			0x004
35 #define CFG_SETUP			0x008
36 #define OB_SIZE				0x030
37 #define CFG_PCIM_WIN_SZ_IDX		3
38 #define CFG_PCIM_WIN_CNT		32
39 #define SPACE0_REMOTE_CFG_OFFSET	0x1000
40 #define OB_OFFSET_INDEX(n)		(0x200 + (8 * n))
41 #define OB_OFFSET_HI(n)			(0x204 + (8 * n))
42 
43 /* IRQ register defines */
44 #define IRQ_EOI				0x050
45 #define IRQ_STATUS			0x184
46 #define IRQ_ENABLE_SET			0x188
47 #define IRQ_ENABLE_CLR			0x18c
48 
49 #define MSI_IRQ				0x054
50 #define MSI0_IRQ_STATUS			0x104
51 #define MSI0_IRQ_ENABLE_SET		0x108
52 #define MSI0_IRQ_ENABLE_CLR		0x10c
53 #define IRQ_STATUS			0x184
54 #define MSI_IRQ_OFFSET			4
55 
56 /* Config space registers */
57 #define DEBUG0				0x728
58 
59 #define to_keystone_pcie(x)	container_of(x, struct keystone_pcie, pp)
60 
sys_to_pcie(struct pci_sys_data * sys)61 static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62 {
63 	return sys->private_data;
64 }
65 
update_reg_offset_bit_pos(u32 offset,u32 * reg_offset,u32 * bit_pos)66 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 					     u32 *bit_pos)
68 {
69 	*reg_offset = offset % 8;
70 	*bit_pos = offset >> 3;
71 }
72 
ks_dw_pcie_get_msi_addr(struct pcie_port * pp)73 u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
74 {
75 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
76 
77 	return ks_pcie->app.start + MSI_IRQ;
78 }
79 
ks_dw_pcie_handle_msi_irq(struct keystone_pcie * ks_pcie,int offset)80 void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
81 {
82 	struct pcie_port *pp = &ks_pcie->pp;
83 	u32 pending, vector;
84 	int src, virq;
85 
86 	pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
87 
88 	/*
89 	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
90 	 * shows 1, 9, 17, 25 and so forth
91 	 */
92 	for (src = 0; src < 4; src++) {
93 		if (BIT(src) & pending) {
94 			vector = offset + (src << 3);
95 			virq = irq_linear_revmap(pp->irq_domain, vector);
96 			dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
97 				src, vector, virq);
98 			generic_handle_irq(virq);
99 		}
100 	}
101 }
102 
ks_dw_pcie_msi_irq_ack(struct irq_data * d)103 static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
104 {
105 	u32 offset, reg_offset, bit_pos;
106 	struct keystone_pcie *ks_pcie;
107 	unsigned int irq = d->irq;
108 	struct msi_desc *msi;
109 	struct pcie_port *pp;
110 
111 	msi = irq_get_msi_desc(irq);
112 	pp = sys_to_pcie(msi->dev->bus->sysdata);
113 	ks_pcie = to_keystone_pcie(pp);
114 	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
115 	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
116 
117 	writel(BIT(bit_pos),
118 	       ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
119 	writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
120 }
121 
ks_dw_pcie_msi_set_irq(struct pcie_port * pp,int irq)122 void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
123 {
124 	u32 reg_offset, bit_pos;
125 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
126 
127 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
128 	writel(BIT(bit_pos),
129 	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
130 }
131 
ks_dw_pcie_msi_clear_irq(struct pcie_port * pp,int irq)132 void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
133 {
134 	u32 reg_offset, bit_pos;
135 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
136 
137 	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
138 	writel(BIT(bit_pos),
139 	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
140 }
141 
ks_dw_pcie_msi_irq_mask(struct irq_data * d)142 static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
143 {
144 	struct keystone_pcie *ks_pcie;
145 	unsigned int irq = d->irq;
146 	struct msi_desc *msi;
147 	struct pcie_port *pp;
148 	u32 offset;
149 
150 	msi = irq_get_msi_desc(irq);
151 	pp = sys_to_pcie(msi->dev->bus->sysdata);
152 	ks_pcie = to_keystone_pcie(pp);
153 	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
154 
155 	/* Mask the end point if PVM implemented */
156 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
157 		if (msi->msi_attrib.maskbit)
158 			pci_msi_mask_irq(d);
159 	}
160 
161 	ks_dw_pcie_msi_clear_irq(pp, offset);
162 }
163 
ks_dw_pcie_msi_irq_unmask(struct irq_data * d)164 static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
165 {
166 	struct keystone_pcie *ks_pcie;
167 	unsigned int irq = d->irq;
168 	struct msi_desc *msi;
169 	struct pcie_port *pp;
170 	u32 offset;
171 
172 	msi = irq_get_msi_desc(irq);
173 	pp = sys_to_pcie(msi->dev->bus->sysdata);
174 	ks_pcie = to_keystone_pcie(pp);
175 	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
176 
177 	/* Mask the end point if PVM implemented */
178 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
179 		if (msi->msi_attrib.maskbit)
180 			pci_msi_unmask_irq(d);
181 	}
182 
183 	ks_dw_pcie_msi_set_irq(pp, offset);
184 }
185 
186 static struct irq_chip ks_dw_pcie_msi_irq_chip = {
187 	.name = "Keystone-PCIe-MSI-IRQ",
188 	.irq_ack = ks_dw_pcie_msi_irq_ack,
189 	.irq_mask = ks_dw_pcie_msi_irq_mask,
190 	.irq_unmask = ks_dw_pcie_msi_irq_unmask,
191 };
192 
ks_dw_pcie_msi_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)193 static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
194 			      irq_hw_number_t hwirq)
195 {
196 	irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
197 				 handle_level_irq);
198 	irq_set_chip_data(irq, domain->host_data);
199 	set_irq_flags(irq, IRQF_VALID);
200 
201 	return 0;
202 }
203 
204 static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
205 	.map = ks_dw_pcie_msi_map,
206 };
207 
ks_dw_pcie_msi_host_init(struct pcie_port * pp,struct msi_controller * chip)208 int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
209 {
210 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
211 	int i;
212 
213 	pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
214 					MAX_MSI_IRQS,
215 					&ks_dw_pcie_msi_domain_ops,
216 					chip);
217 	if (!pp->irq_domain) {
218 		dev_err(pp->dev, "irq domain init failed\n");
219 		return -ENXIO;
220 	}
221 
222 	for (i = 0; i < MAX_MSI_IRQS; i++)
223 		irq_create_mapping(pp->irq_domain, i);
224 
225 	return 0;
226 }
227 
ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie * ks_pcie)228 void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
229 {
230 	int i;
231 
232 	for (i = 0; i < MAX_LEGACY_IRQS; i++)
233 		writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
234 }
235 
ks_dw_pcie_handle_legacy_irq(struct keystone_pcie * ks_pcie,int offset)236 void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
237 {
238 	struct pcie_port *pp = &ks_pcie->pp;
239 	u32 pending;
240 	int virq;
241 
242 	pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
243 
244 	if (BIT(0) & pending) {
245 		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
246 		dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
247 			virq);
248 		generic_handle_irq(virq);
249 	}
250 
251 	/* EOI the INTx interrupt */
252 	writel(offset, ks_pcie->va_app_base + IRQ_EOI);
253 }
254 
ks_dw_pcie_ack_legacy_irq(struct irq_data * d)255 static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
256 {
257 }
258 
ks_dw_pcie_mask_legacy_irq(struct irq_data * d)259 static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
260 {
261 }
262 
ks_dw_pcie_unmask_legacy_irq(struct irq_data * d)263 static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
264 {
265 }
266 
267 static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
268 	.name = "Keystone-PCI-Legacy-IRQ",
269 	.irq_ack = ks_dw_pcie_ack_legacy_irq,
270 	.irq_mask = ks_dw_pcie_mask_legacy_irq,
271 	.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
272 };
273 
ks_dw_pcie_init_legacy_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw_irq)274 static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
275 				unsigned int irq, irq_hw_number_t hw_irq)
276 {
277 	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
278 				 handle_level_irq);
279 	irq_set_chip_data(irq, d->host_data);
280 	set_irq_flags(irq, IRQF_VALID);
281 
282 	return 0;
283 }
284 
285 static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
286 	.map = ks_dw_pcie_init_legacy_irq_map,
287 	.xlate = irq_domain_xlate_onetwocell,
288 };
289 
290 /**
291  * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
292  * registers
293  *
294  * Since modification of dbi_cs2 involves different clock domain, read the
295  * status back to ensure the transition is complete.
296  */
ks_dw_pcie_set_dbi_mode(void __iomem * reg_virt)297 static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
298 {
299 	u32 val;
300 
301 	writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
302 	       reg_virt + CMD_STATUS);
303 
304 	do {
305 		val = readl(reg_virt + CMD_STATUS);
306 	} while (!(val & DBI_CS2_EN_VAL));
307 }
308 
309 /**
310  * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
311  *
312  * Since modification of dbi_cs2 involves different clock domain, read the
313  * status back to ensure the transition is complete.
314  */
ks_dw_pcie_clear_dbi_mode(void __iomem * reg_virt)315 static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
316 {
317 	u32 val;
318 
319 	writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
320 		     reg_virt + CMD_STATUS);
321 
322 	do {
323 		val = readl(reg_virt + CMD_STATUS);
324 	} while (val & DBI_CS2_EN_VAL);
325 }
326 
ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie * ks_pcie)327 void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
328 {
329 	struct pcie_port *pp = &ks_pcie->pp;
330 	u32 start = pp->mem.start, end = pp->mem.end;
331 	int i, tr_size;
332 
333 	/* Disable BARs for inbound access */
334 	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
335 	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
336 	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
337 	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
338 
339 	/* Set outbound translation size per window division */
340 	writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
341 
342 	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
343 
344 	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
345 	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
346 		writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
347 		writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
348 		start += tr_size;
349 	}
350 
351 	/* Enable OB translation */
352 	writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
353 	       ks_pcie->va_app_base + CMD_STATUS);
354 }
355 
356 /**
357  * ks_pcie_cfg_setup() - Set up configuration space address for a device
358  *
359  * @ks_pcie: ptr to keystone_pcie structure
360  * @bus: Bus number the device is residing on
361  * @devfn: device, function number info
362  *
363  * Forms and returns the address of configuration space mapped in PCIESS
364  * address space 0.  Also configures CFG_SETUP for remote configuration space
365  * access.
366  *
367  * The address space has two regions to access configuration - local and remote.
368  * We access local region for bus 0 (as RC is attached on bus 0) and remote
369  * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
370  * we will do TYPE 0 access as it will be on our secondary bus (logical).
371  * CFG_SETUP is needed only for remote configuration access.
372  */
ks_pcie_cfg_setup(struct keystone_pcie * ks_pcie,u8 bus,unsigned int devfn)373 static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
374 				       unsigned int devfn)
375 {
376 	u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
377 	struct pcie_port *pp = &ks_pcie->pp;
378 	u32 regval;
379 
380 	if (bus == 0)
381 		return pp->dbi_base;
382 
383 	regval = (bus << 16) | (device << 8) | function;
384 
385 	/*
386 	 * Since Bus#1 will be a virtual bus, we need to have TYPE0
387 	 * access only.
388 	 * TYPE 1
389 	 */
390 	if (bus != 1)
391 		regval |= BIT(24);
392 
393 	writel(regval, ks_pcie->va_app_base + CFG_SETUP);
394 	return pp->va_cfg0_base;
395 }
396 
ks_dw_pcie_rd_other_conf(struct pcie_port * pp,struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)397 int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
398 			     unsigned int devfn, int where, int size, u32 *val)
399 {
400 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
401 	u8 bus_num = bus->number;
402 	void __iomem *addr;
403 
404 	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
405 
406 	return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
407 }
408 
ks_dw_pcie_wr_other_conf(struct pcie_port * pp,struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)409 int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
410 			     unsigned int devfn, int where, int size, u32 val)
411 {
412 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
413 	u8 bus_num = bus->number;
414 	void __iomem *addr;
415 
416 	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
417 
418 	return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
419 }
420 
421 /**
422  * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
423  *
424  * This sets BAR0 to enable inbound access for MSI_IRQ register
425  */
ks_dw_pcie_v3_65_scan_bus(struct pcie_port * pp)426 void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
427 {
428 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
429 
430 	/* Configure and set up BAR0 */
431 	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
432 
433 	/* Enable BAR0 */
434 	writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
435 	writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
436 
437 	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
438 
439 	 /*
440 	  * For BAR0, just setting bus address for inbound writes (MSI) should
441 	  * be sufficient.  Use physical address to avoid any conflicts.
442 	  */
443 	writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
444 }
445 
446 /**
447  * ks_dw_pcie_link_up() - Check if link up
448  */
ks_dw_pcie_link_up(struct pcie_port * pp)449 int ks_dw_pcie_link_up(struct pcie_port *pp)
450 {
451 	u32 val = readl(pp->dbi_base + DEBUG0);
452 
453 	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
454 }
455 
ks_dw_pcie_initiate_link_train(struct keystone_pcie * ks_pcie)456 void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
457 {
458 	u32 val;
459 
460 	/* Disable Link training */
461 	val = readl(ks_pcie->va_app_base + CMD_STATUS);
462 	val &= ~LTSSM_EN_VAL;
463 	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
464 
465 	/* Initiate Link Training */
466 	val = readl(ks_pcie->va_app_base + CMD_STATUS);
467 	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
468 }
469 
470 /**
471  * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
472  *
473  * Ioremap the register resources, initialize legacy irq domain
474  * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
475  * PCI host controller.
476  */
ks_dw_pcie_host_init(struct keystone_pcie * ks_pcie,struct device_node * msi_intc_np)477 int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
478 				struct device_node *msi_intc_np)
479 {
480 	struct pcie_port *pp = &ks_pcie->pp;
481 	struct platform_device *pdev = to_platform_device(pp->dev);
482 	struct resource *res;
483 
484 	/* Index 0 is the config reg. space address */
485 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
486 	pp->dbi_base = devm_ioremap_resource(pp->dev, res);
487 	if (IS_ERR(pp->dbi_base))
488 		return PTR_ERR(pp->dbi_base);
489 
490 	/*
491 	 * We set these same and is used in pcie rd/wr_other_conf
492 	 * functions
493 	 */
494 	pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
495 	pp->va_cfg1_base = pp->va_cfg0_base;
496 
497 	/* Index 1 is the application reg. space address */
498 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
499 	ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
500 	if (IS_ERR(ks_pcie->va_app_base))
501 		return PTR_ERR(ks_pcie->va_app_base);
502 
503 	ks_pcie->app = *res;
504 
505 	/* Create legacy IRQ domain */
506 	ks_pcie->legacy_irq_domain =
507 			irq_domain_add_linear(ks_pcie->legacy_intc_np,
508 					MAX_LEGACY_IRQS,
509 					&ks_dw_pcie_legacy_irq_domain_ops,
510 					NULL);
511 	if (!ks_pcie->legacy_irq_domain) {
512 		dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
513 		return -EINVAL;
514 	}
515 
516 	return dw_pcie_host_init(pp);
517 }
518