1 /*
2  * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
9  *
10  */
11 
12 #include <linux/interrupt.h>
13 #include <linux/fs.h>
14 #include <linux/slab.h>
15 #include <linux/kernel.h>
16 #include <linux/spi/spi.h>
17 #include <linux/sysfs.h>
18 #include <linux/sched.h>
19 #include <linux/poll.h>
20 
21 #include <linux/iio/iio.h>
22 #include <linux/iio/sysfs.h>
23 #include <linux/iio/buffer.h>
24 #include "../ring_hw.h"
25 #include "sca3000.h"
26 
27 /* RFC / future work
28  *
29  * The internal ring buffer doesn't actually change what it holds depending
30  * on which signals are enabled etc, merely whether you can read them.
31  * As such the scan mode selection is somewhat different than for a software
32  * ring buffer and changing it actually covers any data already in the buffer.
33  * Currently scan elements aren't configured so it doesn't matter.
34  */
35 
sca3000_read_data(struct sca3000_state * st,uint8_t reg_address_high,u8 ** rx_p,int len)36 static int sca3000_read_data(struct sca3000_state *st,
37 			    uint8_t reg_address_high,
38 			    u8 **rx_p,
39 			    int len)
40 {
41 	int ret;
42 	struct spi_transfer xfer[2] = {
43 		{
44 			.len = 1,
45 			.tx_buf = st->tx,
46 		}, {
47 			.len = len,
48 		}
49 	};
50 	*rx_p = kmalloc(len, GFP_KERNEL);
51 	if (*rx_p == NULL) {
52 		ret = -ENOMEM;
53 		goto error_ret;
54 	}
55 	xfer[1].rx_buf = *rx_p;
56 	st->tx[0] = SCA3000_READ_REG(reg_address_high);
57 	ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
58 	if (ret) {
59 		dev_err(get_device(&st->us->dev), "problem reading register");
60 		goto error_free_rx;
61 	}
62 
63 	return 0;
64 error_free_rx:
65 	kfree(*rx_p);
66 error_ret:
67 	return ret;
68 }
69 
70 /**
71  * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
72  * @r:			the ring
73  * @count:		number of samples to try and pull
74  * @data:		output the actual samples pulled from the hw ring
75  *
76  * Currently does not provide timestamps.  As the hardware doesn't add them they
77  * can only be inferred approximately from ring buffer events such as 50% full
78  * and knowledge of when buffer was last emptied.  This is left to userspace.
79  **/
sca3000_read_first_n_hw_rb(struct iio_buffer * r,size_t count,char __user * buf)80 static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
81 				      size_t count, char __user *buf)
82 {
83 	struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
84 	struct iio_dev *indio_dev = hw_ring->private;
85 	struct sca3000_state *st = iio_priv(indio_dev);
86 	u8 *rx;
87 	int ret, i, num_available, num_read = 0;
88 	int bytes_per_sample = 1;
89 
90 	if (st->bpse == 11)
91 		bytes_per_sample = 2;
92 
93 	mutex_lock(&st->lock);
94 	if (count % bytes_per_sample) {
95 		ret = -EINVAL;
96 		goto error_ret;
97 	}
98 
99 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
100 	if (ret)
101 		goto error_ret;
102 	else
103 		num_available = st->rx[0];
104 	/*
105 	 * num_available is the total number of samples available
106 	 * i.e. number of time points * number of channels.
107 	 */
108 	if (count > num_available * bytes_per_sample)
109 		num_read = num_available*bytes_per_sample;
110 	else
111 		num_read = count;
112 
113 	ret = sca3000_read_data(st,
114 				SCA3000_REG_ADDR_RING_OUT,
115 				&rx, num_read);
116 	if (ret)
117 		goto error_ret;
118 
119 	for (i = 0; i < num_read / sizeof(u16); i++)
120 		*(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
121 
122 	if (copy_to_user(buf, rx, num_read))
123 		ret = -EFAULT;
124 	kfree(rx);
125 	r->stufftoread = 0;
126 error_ret:
127 	mutex_unlock(&st->lock);
128 
129 	return ret ? ret : num_read;
130 }
131 
sca3000_ring_buf_data_available(struct iio_buffer * r)132 static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
133 {
134 	return r->stufftoread ? r->watermark : 0;
135 }
136 
137 /**
138  * sca3000_query_ring_int() is the hardware ring status interrupt enabled
139  **/
sca3000_query_ring_int(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t sca3000_query_ring_int(struct device *dev,
141 				      struct device_attribute *attr,
142 				      char *buf)
143 {
144 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
145 	int ret, val;
146 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
147 	struct sca3000_state *st = iio_priv(indio_dev);
148 
149 	mutex_lock(&st->lock);
150 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
151 	val = st->rx[0];
152 	mutex_unlock(&st->lock);
153 	if (ret)
154 		return ret;
155 
156 	return sprintf(buf, "%d\n", !!(val & this_attr->address));
157 }
158 
159 /**
160  * sca3000_set_ring_int() set state of ring status interrupt
161  **/
sca3000_set_ring_int(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)162 static ssize_t sca3000_set_ring_int(struct device *dev,
163 				      struct device_attribute *attr,
164 				      const char *buf,
165 				      size_t len)
166 {
167 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
168 	struct sca3000_state *st = iio_priv(indio_dev);
169 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
170 	u8 val;
171 	int ret;
172 
173 	mutex_lock(&st->lock);
174 	ret = kstrtou8(buf, 10, &val);
175 	if (ret)
176 		goto error_ret;
177 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
178 	if (ret)
179 		goto error_ret;
180 	if (val)
181 		ret = sca3000_write_reg(st,
182 					SCA3000_REG_ADDR_INT_MASK,
183 					st->rx[0] | this_attr->address);
184 	else
185 		ret = sca3000_write_reg(st,
186 					SCA3000_REG_ADDR_INT_MASK,
187 					st->rx[0] & ~this_attr->address);
188 error_ret:
189 	mutex_unlock(&st->lock);
190 
191 	return ret ? ret : len;
192 }
193 
194 static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
195 		       sca3000_query_ring_int,
196 		       sca3000_set_ring_int,
197 		       SCA3000_INT_MASK_RING_HALF);
198 
199 static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
200 		       sca3000_query_ring_int,
201 		       sca3000_set_ring_int,
202 		       SCA3000_INT_MASK_RING_THREE_QUARTER);
203 
sca3000_show_buffer_scale(struct device * dev,struct device_attribute * attr,char * buf)204 static ssize_t sca3000_show_buffer_scale(struct device *dev,
205 					 struct device_attribute *attr,
206 					 char *buf)
207 {
208 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
209 	struct sca3000_state *st = iio_priv(indio_dev);
210 
211 	return sprintf(buf, "0.%06d\n", 4*st->info->scale);
212 }
213 
214 static IIO_DEVICE_ATTR(in_accel_scale,
215 		       S_IRUGO,
216 		       sca3000_show_buffer_scale,
217 		       NULL,
218 		       0);
219 
220 /*
221  * Ring buffer attributes
222  * This device is a bit unusual in that the sampling frequency and bpse
223  * only apply to the ring buffer.  At all times full rate and accuracy
224  * is available via direct reading from registers.
225  */
226 static const struct attribute *sca3000_ring_attributes[] = {
227 	&iio_dev_attr_50_percent.dev_attr.attr,
228 	&iio_dev_attr_75_percent.dev_attr.attr,
229 	&iio_dev_attr_in_accel_scale.dev_attr.attr,
230 	NULL,
231 };
232 
sca3000_rb_allocate(struct iio_dev * indio_dev)233 static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
234 {
235 	struct iio_buffer *buf;
236 	struct iio_hw_buffer *ring;
237 
238 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
239 	if (!ring)
240 		return NULL;
241 
242 	ring->private = indio_dev;
243 	buf = &ring->buf;
244 	buf->stufftoread = 0;
245 	buf->length = 64;
246 	buf->attrs = sca3000_ring_attributes;
247 	iio_buffer_init(buf);
248 
249 	return buf;
250 }
251 
sca3000_ring_release(struct iio_buffer * r)252 static void sca3000_ring_release(struct iio_buffer *r)
253 {
254 	kfree(iio_to_hw_buf(r));
255 }
256 
257 static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
258 	.read_first_n = &sca3000_read_first_n_hw_rb,
259 	.data_available = sca3000_ring_buf_data_available,
260 	.release = sca3000_ring_release,
261 };
262 
sca3000_configure_ring(struct iio_dev * indio_dev)263 int sca3000_configure_ring(struct iio_dev *indio_dev)
264 {
265 	struct iio_buffer *buffer;
266 
267 	buffer = sca3000_rb_allocate(indio_dev);
268 	if (buffer == NULL)
269 		return -ENOMEM;
270 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
271 
272 	indio_dev->buffer->access = &sca3000_ring_access_funcs;
273 
274 	iio_device_attach_buffer(indio_dev, buffer);
275 
276 	return 0;
277 }
278 
sca3000_unconfigure_ring(struct iio_dev * indio_dev)279 void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
280 {
281 	iio_buffer_put(indio_dev->buffer);
282 }
283 
284 static inline
__sca3000_hw_ring_state_set(struct iio_dev * indio_dev,bool state)285 int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
286 {
287 	struct sca3000_state *st = iio_priv(indio_dev);
288 	int ret;
289 
290 	mutex_lock(&st->lock);
291 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
292 	if (ret)
293 		goto error_ret;
294 	if (state) {
295 		dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
296 		ret = sca3000_write_reg(st,
297 					SCA3000_REG_ADDR_MODE,
298 					(st->rx[0] | SCA3000_RING_BUF_ENABLE));
299 	} else
300 		ret = sca3000_write_reg(st,
301 					SCA3000_REG_ADDR_MODE,
302 					(st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
303 error_ret:
304 	mutex_unlock(&st->lock);
305 
306 	return ret;
307 }
308 /**
309  * sca3000_hw_ring_preenable() hw ring buffer preenable function
310  *
311  * Very simple enable function as the chip will allows normal reads
312  * during ring buffer operation so as long as it is indeed running
313  * before we notify the core, the precise ordering does not matter.
314  **/
sca3000_hw_ring_preenable(struct iio_dev * indio_dev)315 static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
316 {
317 	return __sca3000_hw_ring_state_set(indio_dev, 1);
318 }
319 
sca3000_hw_ring_postdisable(struct iio_dev * indio_dev)320 static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
321 {
322 	return __sca3000_hw_ring_state_set(indio_dev, 0);
323 }
324 
325 static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
326 	.preenable = &sca3000_hw_ring_preenable,
327 	.postdisable = &sca3000_hw_ring_postdisable,
328 };
329 
sca3000_register_ring_funcs(struct iio_dev * indio_dev)330 void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
331 {
332 	indio_dev->setup_ops = &sca3000_ring_setup_ops;
333 }
334 
335 /**
336  * sca3000_ring_int_process() ring specific interrupt handling.
337  *
338  * This is only split from the main interrupt handler so as to
339  * reduce the amount of code if the ring buffer is not enabled.
340  **/
sca3000_ring_int_process(u8 val,struct iio_buffer * ring)341 void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
342 {
343 	if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
344 		   SCA3000_INT_STATUS_HALF)) {
345 		ring->stufftoread = true;
346 		wake_up_interruptible(&ring->pollq);
347 	}
348 }
349