1 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/miscdevice.h>
22 #include <linux/uaccess.h>
23 #include <linux/slab.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/spinlock.h>
26 #include <linux/clk.h>
27 #include <linux/of.h>
28 #include <linux/coresight.h>
29 #include <linux/amba/bus.h>
30 
31 #include "coresight-priv.h"
32 
33 #define TMC_RSZ			0x004
34 #define TMC_STS			0x00c
35 #define TMC_RRD			0x010
36 #define TMC_RRP			0x014
37 #define TMC_RWP			0x018
38 #define TMC_TRG			0x01c
39 #define TMC_CTL			0x020
40 #define TMC_RWD			0x024
41 #define TMC_MODE		0x028
42 #define TMC_LBUFLEVEL		0x02c
43 #define TMC_CBUFLEVEL		0x030
44 #define TMC_BUFWM		0x034
45 #define TMC_RRPHI		0x038
46 #define TMC_RWPHI		0x03c
47 #define TMC_AXICTL		0x110
48 #define TMC_DBALO		0x118
49 #define TMC_DBAHI		0x11c
50 #define TMC_FFSR		0x300
51 #define TMC_FFCR		0x304
52 #define TMC_PSCR		0x308
53 #define TMC_ITMISCOP0		0xee0
54 #define TMC_ITTRFLIN		0xee8
55 #define TMC_ITATBDATA0		0xeec
56 #define TMC_ITATBCTR2		0xef0
57 #define TMC_ITATBCTR1		0xef4
58 #define TMC_ITATBCTR0		0xef8
59 
60 /* register description */
61 /* TMC_CTL - 0x020 */
62 #define TMC_CTL_CAPT_EN		BIT(0)
63 /* TMC_STS - 0x00C */
64 #define TMC_STS_TRIGGERED	BIT(1)
65 /* TMC_AXICTL - 0x110 */
66 #define TMC_AXICTL_PROT_CTL_B0	BIT(0)
67 #define TMC_AXICTL_PROT_CTL_B1	BIT(1)
68 #define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
69 #define TMC_AXICTL_WR_BURST_LEN 0xF00
70 /* TMC_FFCR - 0x304 */
71 #define TMC_FFCR_EN_FMT		BIT(0)
72 #define TMC_FFCR_EN_TI		BIT(1)
73 #define TMC_FFCR_FON_FLIN	BIT(4)
74 #define TMC_FFCR_FON_TRIG_EVT	BIT(5)
75 #define TMC_FFCR_FLUSHMAN	BIT(6)
76 #define TMC_FFCR_TRIGON_TRIGIN	BIT(8)
77 #define TMC_FFCR_STOP_ON_FLUSH	BIT(12)
78 
79 #define TMC_STS_TRIGGERED_BIT	2
80 #define TMC_FFCR_FLUSHMAN_BIT	6
81 
82 enum tmc_config_type {
83 	TMC_CONFIG_TYPE_ETB,
84 	TMC_CONFIG_TYPE_ETR,
85 	TMC_CONFIG_TYPE_ETF,
86 };
87 
88 enum tmc_mode {
89 	TMC_MODE_CIRCULAR_BUFFER,
90 	TMC_MODE_SOFTWARE_FIFO,
91 	TMC_MODE_HARDWARE_FIFO,
92 };
93 
94 enum tmc_mem_intf_width {
95 	TMC_MEM_INTF_WIDTH_32BITS	= 0x2,
96 	TMC_MEM_INTF_WIDTH_64BITS	= 0x3,
97 	TMC_MEM_INTF_WIDTH_128BITS	= 0x4,
98 	TMC_MEM_INTF_WIDTH_256BITS	= 0x5,
99 };
100 
101 /**
102  * struct tmc_drvdata - specifics associated to an TMC component
103  * @base:	memory mapped base address for this component.
104  * @dev:	the device entity associated to this component.
105  * @csdev:	component vitals needed by the framework.
106  * @miscdev:	specifics to handle "/dev/xyz.tmc" entry.
107  * @clk:	the clock this component is associated to.
108  * @spinlock:	only one at a time pls.
109  * @read_count:	manages preparation of buffer for reading.
110  * @buf:	area of memory where trace data get sent.
111  * @paddr:	DMA start location in RAM.
112  * @vaddr:	virtual representation of @paddr.
113  * @size:	@buf size.
114  * @enable:	this TMC is being used.
115  * @config_type: TMC variant, must be of type @tmc_config_type.
116  * @trigger_cntr: amount of words to store after a trigger.
117  */
118 struct tmc_drvdata {
119 	void __iomem		*base;
120 	struct device		*dev;
121 	struct coresight_device	*csdev;
122 	struct miscdevice	miscdev;
123 	struct clk		*clk;
124 	spinlock_t		spinlock;
125 	int			read_count;
126 	bool			reading;
127 	char			*buf;
128 	dma_addr_t		paddr;
129 	void __iomem		*vaddr;
130 	u32			size;
131 	bool			enable;
132 	enum tmc_config_type	config_type;
133 	u32			trigger_cntr;
134 };
135 
tmc_wait_for_ready(struct tmc_drvdata * drvdata)136 static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
137 {
138 	/* Ensure formatter, unformatter and hardware fifo are empty */
139 	if (coresight_timeout(drvdata->base,
140 			      TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
141 		dev_err(drvdata->dev,
142 			"timeout observed when probing at offset %#x\n",
143 			TMC_STS);
144 	}
145 }
146 
tmc_flush_and_stop(struct tmc_drvdata * drvdata)147 static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
148 {
149 	u32 ffcr;
150 
151 	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
152 	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
153 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
154 	ffcr |= TMC_FFCR_FLUSHMAN;
155 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
156 	/* Ensure flush completes */
157 	if (coresight_timeout(drvdata->base,
158 			      TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
159 		dev_err(drvdata->dev,
160 			"timeout observed when probing at offset %#x\n",
161 			TMC_FFCR);
162 	}
163 
164 	tmc_wait_for_ready(drvdata);
165 }
166 
tmc_enable_hw(struct tmc_drvdata * drvdata)167 static void tmc_enable_hw(struct tmc_drvdata *drvdata)
168 {
169 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
170 }
171 
tmc_disable_hw(struct tmc_drvdata * drvdata)172 static void tmc_disable_hw(struct tmc_drvdata *drvdata)
173 {
174 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
175 }
176 
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)177 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
178 {
179 	/* Zero out the memory to help with debug */
180 	memset(drvdata->buf, 0, drvdata->size);
181 
182 	CS_UNLOCK(drvdata->base);
183 
184 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
185 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
186 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
187 		       TMC_FFCR_TRIGON_TRIGIN,
188 		       drvdata->base + TMC_FFCR);
189 
190 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
191 	tmc_enable_hw(drvdata);
192 
193 	CS_LOCK(drvdata->base);
194 }
195 
tmc_etr_enable_hw(struct tmc_drvdata * drvdata)196 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
197 {
198 	u32 axictl;
199 
200 	/* Zero out the memory to help with debug */
201 	memset(drvdata->vaddr, 0, drvdata->size);
202 
203 	CS_UNLOCK(drvdata->base);
204 
205 	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
206 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
207 
208 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
209 	axictl |= TMC_AXICTL_WR_BURST_LEN;
210 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
211 	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
212 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
213 	axictl = (axictl &
214 		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
215 		  TMC_AXICTL_PROT_CTL_B1;
216 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
217 
218 	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
219 	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
220 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
221 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
222 		       TMC_FFCR_TRIGON_TRIGIN,
223 		       drvdata->base + TMC_FFCR);
224 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
225 	tmc_enable_hw(drvdata);
226 
227 	CS_LOCK(drvdata->base);
228 }
229 
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)230 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
231 {
232 	CS_UNLOCK(drvdata->base);
233 
234 	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
235 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
236 		       drvdata->base + TMC_FFCR);
237 	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
238 	tmc_enable_hw(drvdata);
239 
240 	CS_LOCK(drvdata->base);
241 }
242 
tmc_enable(struct tmc_drvdata * drvdata,enum tmc_mode mode)243 static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
244 {
245 	int ret;
246 	unsigned long flags;
247 
248 	ret = clk_prepare_enable(drvdata->clk);
249 	if (ret)
250 		return ret;
251 
252 	spin_lock_irqsave(&drvdata->spinlock, flags);
253 	if (drvdata->reading) {
254 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
255 		clk_disable_unprepare(drvdata->clk);
256 		return -EBUSY;
257 	}
258 
259 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
260 		tmc_etb_enable_hw(drvdata);
261 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
262 		tmc_etr_enable_hw(drvdata);
263 	} else {
264 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
265 			tmc_etb_enable_hw(drvdata);
266 		else
267 			tmc_etf_enable_hw(drvdata);
268 	}
269 	drvdata->enable = true;
270 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
271 
272 	dev_info(drvdata->dev, "TMC enabled\n");
273 	return 0;
274 }
275 
tmc_enable_sink(struct coresight_device * csdev)276 static int tmc_enable_sink(struct coresight_device *csdev)
277 {
278 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
279 
280 	return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
281 }
282 
tmc_enable_link(struct coresight_device * csdev,int inport,int outport)283 static int tmc_enable_link(struct coresight_device *csdev, int inport,
284 			   int outport)
285 {
286 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
287 
288 	return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
289 }
290 
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)291 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
292 {
293 	enum tmc_mem_intf_width memwidth;
294 	u8 memwords;
295 	char *bufp;
296 	u32 read_data;
297 	int i;
298 
299 	memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
300 	if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
301 		memwords = 1;
302 	else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
303 		memwords = 2;
304 	else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
305 		memwords = 4;
306 	else
307 		memwords = 8;
308 
309 	bufp = drvdata->buf;
310 	while (1) {
311 		for (i = 0; i < memwords; i++) {
312 			read_data = readl_relaxed(drvdata->base + TMC_RRD);
313 			if (read_data == 0xFFFFFFFF)
314 				return;
315 			memcpy(bufp, &read_data, 4);
316 			bufp += 4;
317 		}
318 	}
319 }
320 
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)321 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
322 {
323 	CS_UNLOCK(drvdata->base);
324 
325 	tmc_flush_and_stop(drvdata);
326 	tmc_etb_dump_hw(drvdata);
327 	tmc_disable_hw(drvdata);
328 
329 	CS_LOCK(drvdata->base);
330 }
331 
tmc_etr_dump_hw(struct tmc_drvdata * drvdata)332 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
333 {
334 	u32 rwp, val;
335 
336 	rwp = readl_relaxed(drvdata->base + TMC_RWP);
337 	val = readl_relaxed(drvdata->base + TMC_STS);
338 
339 	/* How much memory do we still have */
340 	if (val & BIT(0))
341 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
342 	else
343 		drvdata->buf = drvdata->vaddr;
344 }
345 
tmc_etr_disable_hw(struct tmc_drvdata * drvdata)346 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
347 {
348 	CS_UNLOCK(drvdata->base);
349 
350 	tmc_flush_and_stop(drvdata);
351 	tmc_etr_dump_hw(drvdata);
352 	tmc_disable_hw(drvdata);
353 
354 	CS_LOCK(drvdata->base);
355 }
356 
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)357 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
358 {
359 	CS_UNLOCK(drvdata->base);
360 
361 	tmc_flush_and_stop(drvdata);
362 	tmc_disable_hw(drvdata);
363 
364 	CS_LOCK(drvdata->base);
365 }
366 
tmc_disable(struct tmc_drvdata * drvdata,enum tmc_mode mode)367 static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
368 {
369 	unsigned long flags;
370 
371 	spin_lock_irqsave(&drvdata->spinlock, flags);
372 	if (drvdata->reading)
373 		goto out;
374 
375 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
376 		tmc_etb_disable_hw(drvdata);
377 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
378 		tmc_etr_disable_hw(drvdata);
379 	} else {
380 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
381 			tmc_etb_disable_hw(drvdata);
382 		else
383 			tmc_etf_disable_hw(drvdata);
384 	}
385 out:
386 	drvdata->enable = false;
387 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
388 
389 	clk_disable_unprepare(drvdata->clk);
390 
391 	dev_info(drvdata->dev, "TMC disabled\n");
392 }
393 
tmc_disable_sink(struct coresight_device * csdev)394 static void tmc_disable_sink(struct coresight_device *csdev)
395 {
396 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
397 
398 	tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
399 }
400 
tmc_disable_link(struct coresight_device * csdev,int inport,int outport)401 static void tmc_disable_link(struct coresight_device *csdev, int inport,
402 			     int outport)
403 {
404 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
405 
406 	tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
407 }
408 
409 static const struct coresight_ops_sink tmc_sink_ops = {
410 	.enable		= tmc_enable_sink,
411 	.disable	= tmc_disable_sink,
412 };
413 
414 static const struct coresight_ops_link tmc_link_ops = {
415 	.enable		= tmc_enable_link,
416 	.disable	= tmc_disable_link,
417 };
418 
419 static const struct coresight_ops tmc_etb_cs_ops = {
420 	.sink_ops	= &tmc_sink_ops,
421 };
422 
423 static const struct coresight_ops tmc_etr_cs_ops = {
424 	.sink_ops	= &tmc_sink_ops,
425 };
426 
427 static const struct coresight_ops tmc_etf_cs_ops = {
428 	.sink_ops	= &tmc_sink_ops,
429 	.link_ops	= &tmc_link_ops,
430 };
431 
tmc_read_prepare(struct tmc_drvdata * drvdata)432 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
433 {
434 	int ret;
435 	unsigned long flags;
436 	enum tmc_mode mode;
437 
438 	spin_lock_irqsave(&drvdata->spinlock, flags);
439 	if (!drvdata->enable)
440 		goto out;
441 
442 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
443 		tmc_etb_disable_hw(drvdata);
444 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
445 		tmc_etr_disable_hw(drvdata);
446 	} else {
447 		mode = readl_relaxed(drvdata->base + TMC_MODE);
448 		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
449 			tmc_etb_disable_hw(drvdata);
450 		} else {
451 			ret = -ENODEV;
452 			goto err;
453 		}
454 	}
455 out:
456 	drvdata->reading = true;
457 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
458 
459 	dev_info(drvdata->dev, "TMC read start\n");
460 	return 0;
461 err:
462 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
463 	return ret;
464 }
465 
tmc_read_unprepare(struct tmc_drvdata * drvdata)466 static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
467 {
468 	unsigned long flags;
469 	enum tmc_mode mode;
470 
471 	spin_lock_irqsave(&drvdata->spinlock, flags);
472 	if (!drvdata->enable)
473 		goto out;
474 
475 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
476 		tmc_etb_enable_hw(drvdata);
477 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
478 		tmc_etr_enable_hw(drvdata);
479 	} else {
480 		mode = readl_relaxed(drvdata->base + TMC_MODE);
481 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
482 			tmc_etb_enable_hw(drvdata);
483 	}
484 out:
485 	drvdata->reading = false;
486 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
487 
488 	dev_info(drvdata->dev, "TMC read end\n");
489 }
490 
tmc_open(struct inode * inode,struct file * file)491 static int tmc_open(struct inode *inode, struct file *file)
492 {
493 	struct tmc_drvdata *drvdata = container_of(file->private_data,
494 						   struct tmc_drvdata, miscdev);
495 	int ret = 0;
496 
497 	if (drvdata->read_count++)
498 		goto out;
499 
500 	ret = tmc_read_prepare(drvdata);
501 	if (ret)
502 		return ret;
503 out:
504 	nonseekable_open(inode, file);
505 
506 	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
507 	return 0;
508 }
509 
tmc_read(struct file * file,char __user * data,size_t len,loff_t * ppos)510 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
511 			loff_t *ppos)
512 {
513 	struct tmc_drvdata *drvdata = container_of(file->private_data,
514 						   struct tmc_drvdata, miscdev);
515 	char *bufp = drvdata->buf + *ppos;
516 
517 	if (*ppos + len > drvdata->size)
518 		len = drvdata->size - *ppos;
519 
520 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
521 		if (bufp == (char *)(drvdata->vaddr + drvdata->size))
522 			bufp = drvdata->vaddr;
523 		else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
524 			bufp -= drvdata->size;
525 		if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
526 			len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
527 	}
528 
529 	if (copy_to_user(data, bufp, len)) {
530 		dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
531 		return -EFAULT;
532 	}
533 
534 	*ppos += len;
535 
536 	dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
537 		__func__, len, (int)(drvdata->size - *ppos));
538 	return len;
539 }
540 
tmc_release(struct inode * inode,struct file * file)541 static int tmc_release(struct inode *inode, struct file *file)
542 {
543 	struct tmc_drvdata *drvdata = container_of(file->private_data,
544 						   struct tmc_drvdata, miscdev);
545 
546 	if (--drvdata->read_count) {
547 		if (drvdata->read_count < 0) {
548 			dev_err(drvdata->dev, "mismatched close\n");
549 			drvdata->read_count = 0;
550 		}
551 		goto out;
552 	}
553 
554 	tmc_read_unprepare(drvdata);
555 out:
556 	dev_dbg(drvdata->dev, "%s: released\n", __func__);
557 	return 0;
558 }
559 
560 static const struct file_operations tmc_fops = {
561 	.owner		= THIS_MODULE,
562 	.open		= tmc_open,
563 	.read		= tmc_read,
564 	.release	= tmc_release,
565 	.llseek		= no_llseek,
566 };
567 
status_show(struct device * dev,struct device_attribute * attr,char * buf)568 static ssize_t status_show(struct device *dev,
569 			   struct device_attribute *attr, char *buf)
570 {
571 	int ret;
572 	unsigned long flags;
573 	u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
574 	u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
575 	u32 devid;
576 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
577 
578 	ret = clk_prepare_enable(drvdata->clk);
579 	if (ret)
580 		goto out;
581 
582 	spin_lock_irqsave(&drvdata->spinlock, flags);
583 	CS_UNLOCK(drvdata->base);
584 
585 	tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
586 	tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
587 	tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
588 	tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
589 	tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
590 	tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
591 	tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
592 	tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
593 	tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
594 	tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
595 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
596 
597 	CS_LOCK(drvdata->base);
598 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
599 
600 	clk_disable_unprepare(drvdata->clk);
601 
602 	return sprintf(buf,
603 		       "Depth:\t\t0x%x\n"
604 		       "Status:\t\t0x%x\n"
605 		       "RAM read ptr:\t0x%x\n"
606 		       "RAM wrt ptr:\t0x%x\n"
607 		       "Trigger cnt:\t0x%x\n"
608 		       "Control:\t0x%x\n"
609 		       "Flush status:\t0x%x\n"
610 		       "Flush ctrl:\t0x%x\n"
611 		       "Mode:\t\t0x%x\n"
612 		       "PSRC:\t\t0x%x\n"
613 		       "DEVID:\t\t0x%x\n",
614 			tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
615 			tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
616 out:
617 	return -EINVAL;
618 }
619 static DEVICE_ATTR_RO(status);
620 
trigger_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)621 static ssize_t trigger_cntr_show(struct device *dev,
622 			    struct device_attribute *attr, char *buf)
623 {
624 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
625 	unsigned long val = drvdata->trigger_cntr;
626 
627 	return sprintf(buf, "%#lx\n", val);
628 }
629 
trigger_cntr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)630 static ssize_t trigger_cntr_store(struct device *dev,
631 			     struct device_attribute *attr,
632 			     const char *buf, size_t size)
633 {
634 	int ret;
635 	unsigned long val;
636 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
637 
638 	ret = kstrtoul(buf, 16, &val);
639 	if (ret)
640 		return ret;
641 
642 	drvdata->trigger_cntr = val;
643 	return size;
644 }
645 static DEVICE_ATTR_RW(trigger_cntr);
646 
647 static struct attribute *coresight_etb_attrs[] = {
648 	&dev_attr_trigger_cntr.attr,
649 	&dev_attr_status.attr,
650 	NULL,
651 };
652 ATTRIBUTE_GROUPS(coresight_etb);
653 
654 static struct attribute *coresight_etr_attrs[] = {
655 	&dev_attr_trigger_cntr.attr,
656 	&dev_attr_status.attr,
657 	NULL,
658 };
659 ATTRIBUTE_GROUPS(coresight_etr);
660 
661 static struct attribute *coresight_etf_attrs[] = {
662 	&dev_attr_trigger_cntr.attr,
663 	&dev_attr_status.attr,
664 	NULL,
665 };
666 ATTRIBUTE_GROUPS(coresight_etf);
667 
tmc_probe(struct amba_device * adev,const struct amba_id * id)668 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
669 {
670 	int ret = 0;
671 	u32 devid;
672 	void __iomem *base;
673 	struct device *dev = &adev->dev;
674 	struct coresight_platform_data *pdata = NULL;
675 	struct tmc_drvdata *drvdata;
676 	struct resource *res = &adev->res;
677 	struct coresight_desc *desc;
678 	struct device_node *np = adev->dev.of_node;
679 
680 	if (np) {
681 		pdata = of_get_coresight_platform_data(dev, np);
682 		if (IS_ERR(pdata))
683 			return PTR_ERR(pdata);
684 		adev->dev.platform_data = pdata;
685 	}
686 
687 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
688 	if (!drvdata)
689 		return -ENOMEM;
690 
691 	drvdata->dev = &adev->dev;
692 	dev_set_drvdata(dev, drvdata);
693 
694 	/* Validity for the resource is already checked by the AMBA core */
695 	base = devm_ioremap_resource(dev, res);
696 	if (IS_ERR(base))
697 		return PTR_ERR(base);
698 
699 	drvdata->base = base;
700 
701 	spin_lock_init(&drvdata->spinlock);
702 
703 	drvdata->clk = adev->pclk;
704 	ret = clk_prepare_enable(drvdata->clk);
705 	if (ret)
706 		return ret;
707 
708 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
709 	drvdata->config_type = BMVAL(devid, 6, 7);
710 
711 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
712 		if (np)
713 			ret = of_property_read_u32(np,
714 						   "arm,buffer-size",
715 						   &drvdata->size);
716 		if (ret)
717 			drvdata->size = SZ_1M;
718 	} else {
719 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
720 	}
721 
722 	clk_disable_unprepare(drvdata->clk);
723 
724 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
725 		drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
726 						&drvdata->paddr, GFP_KERNEL);
727 		if (!drvdata->vaddr)
728 			return -ENOMEM;
729 
730 		memset(drvdata->vaddr, 0, drvdata->size);
731 		drvdata->buf = drvdata->vaddr;
732 	} else {
733 		drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
734 		if (!drvdata->buf)
735 			return -ENOMEM;
736 	}
737 
738 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
739 	if (!desc) {
740 		ret = -ENOMEM;
741 		goto err_devm_kzalloc;
742 	}
743 
744 	desc->pdata = pdata;
745 	desc->dev = dev;
746 	desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
747 
748 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
749 		desc->type = CORESIGHT_DEV_TYPE_SINK;
750 		desc->ops = &tmc_etb_cs_ops;
751 		desc->groups = coresight_etb_groups;
752 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
753 		desc->type = CORESIGHT_DEV_TYPE_SINK;
754 		desc->ops = &tmc_etr_cs_ops;
755 		desc->groups = coresight_etr_groups;
756 	} else {
757 		desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
758 		desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
759 		desc->ops = &tmc_etf_cs_ops;
760 		desc->groups = coresight_etf_groups;
761 	}
762 
763 	drvdata->csdev = coresight_register(desc);
764 	if (IS_ERR(drvdata->csdev)) {
765 		ret = PTR_ERR(drvdata->csdev);
766 		goto err_devm_kzalloc;
767 	}
768 
769 	drvdata->miscdev.name = pdata->name;
770 	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
771 	drvdata->miscdev.fops = &tmc_fops;
772 	ret = misc_register(&drvdata->miscdev);
773 	if (ret)
774 		goto err_misc_register;
775 
776 	dev_info(dev, "TMC initialized\n");
777 	return 0;
778 
779 err_misc_register:
780 	coresight_unregister(drvdata->csdev);
781 err_devm_kzalloc:
782 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
783 		dma_free_coherent(dev, drvdata->size,
784 				&drvdata->paddr, GFP_KERNEL);
785 	return ret;
786 }
787 
tmc_remove(struct amba_device * adev)788 static int tmc_remove(struct amba_device *adev)
789 {
790 	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
791 
792 	misc_deregister(&drvdata->miscdev);
793 	coresight_unregister(drvdata->csdev);
794 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
795 		dma_free_coherent(drvdata->dev, drvdata->size,
796 				  &drvdata->paddr, GFP_KERNEL);
797 
798 	return 0;
799 }
800 
801 static struct amba_id tmc_ids[] = {
802 	{
803 		.id     = 0x0003b961,
804 		.mask   = 0x0003ffff,
805 	},
806 	{ 0, 0},
807 };
808 
809 static struct amba_driver tmc_driver = {
810 	.drv = {
811 		.name   = "coresight-tmc",
812 		.owner  = THIS_MODULE,
813 	},
814 	.probe		= tmc_probe,
815 	.remove		= tmc_remove,
816 	.id_table	= tmc_ids,
817 };
818 
819 module_amba_driver(tmc_driver);
820 
821 MODULE_LICENSE("GPL v2");
822 MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
823