1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24
25#include <linux/iio/iio.h>
26#include "iio_core.h"
27#include <linux/iio/sysfs.h>
28#include <linux/iio/buffer.h>
29
30static const char * const iio_endian_prefix[] = {
31	[IIO_BE] = "be",
32	[IIO_LE] = "le",
33};
34
35static bool iio_buffer_is_active(struct iio_buffer *buf)
36{
37	return !list_empty(&buf->buffer_list);
38}
39
40static size_t iio_buffer_data_available(struct iio_buffer *buf)
41{
42	return buf->access->data_available(buf);
43}
44
45static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46				   struct iio_buffer *buf, size_t required)
47{
48	if (!indio_dev->info->hwfifo_flush_to_buffer)
49		return -ENODEV;
50
51	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
52}
53
54static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
55			     size_t to_wait, int to_flush)
56{
57	size_t avail;
58	int flushed = 0;
59
60	/* wakeup if the device was unregistered */
61	if (!indio_dev->info)
62		return true;
63
64	/* drain the buffer if it was disabled */
65	if (!iio_buffer_is_active(buf)) {
66		to_wait = min_t(size_t, to_wait, 1);
67		to_flush = 0;
68	}
69
70	avail = iio_buffer_data_available(buf);
71
72	if (avail >= to_wait) {
73		/* force a flush for non-blocking reads */
74		if (!to_wait && !avail && to_flush)
75			iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
76		return true;
77	}
78
79	if (to_flush)
80		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
81						  to_wait - avail);
82	if (flushed <= 0)
83		return false;
84
85	if (avail + flushed >= to_wait)
86		return true;
87
88	return false;
89}
90
91/**
92 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
93 *
94 * This function relies on all buffer implementations having an
95 * iio_buffer as their first element.
96 **/
97ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
98				      size_t n, loff_t *f_ps)
99{
100	struct iio_dev *indio_dev = filp->private_data;
101	struct iio_buffer *rb = indio_dev->buffer;
102	size_t datum_size;
103	size_t to_wait = 0;
104	size_t to_read;
105	int ret;
106
107	if (!indio_dev->info)
108		return -ENODEV;
109
110	if (!rb || !rb->access->read_first_n)
111		return -EINVAL;
112
113	datum_size = rb->bytes_per_datum;
114
115	/*
116	 * If datum_size is 0 there will never be anything to read from the
117	 * buffer, so signal end of file now.
118	 */
119	if (!datum_size)
120		return 0;
121
122	to_read = min_t(size_t, n / datum_size, rb->watermark);
123
124	if (!(filp->f_flags & O_NONBLOCK))
125		to_wait = to_read;
126
127	do {
128		ret = wait_event_interruptible(rb->pollq,
129			iio_buffer_ready(indio_dev, rb, to_wait, to_read));
130		if (ret)
131			return ret;
132
133		if (!indio_dev->info)
134			return -ENODEV;
135
136		ret = rb->access->read_first_n(rb, n, buf);
137		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
138			ret = -EAGAIN;
139	 } while (ret == 0);
140
141	return ret;
142}
143
144/**
145 * iio_buffer_poll() - poll the buffer to find out if it has data
146 */
147unsigned int iio_buffer_poll(struct file *filp,
148			     struct poll_table_struct *wait)
149{
150	struct iio_dev *indio_dev = filp->private_data;
151	struct iio_buffer *rb = indio_dev->buffer;
152
153	if (!indio_dev->info)
154		return 0;
155
156	poll_wait(filp, &rb->pollq, wait);
157	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
158		return POLLIN | POLLRDNORM;
159	return 0;
160}
161
162/**
163 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
164 * @indio_dev: The IIO device
165 *
166 * Wakes up the event waitqueue used for poll(). Should usually
167 * be called when the device is unregistered.
168 */
169void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
170{
171	if (!indio_dev->buffer)
172		return;
173
174	wake_up(&indio_dev->buffer->pollq);
175}
176
177void iio_buffer_init(struct iio_buffer *buffer)
178{
179	INIT_LIST_HEAD(&buffer->demux_list);
180	INIT_LIST_HEAD(&buffer->buffer_list);
181	init_waitqueue_head(&buffer->pollq);
182	kref_init(&buffer->ref);
183	buffer->watermark = 1;
184}
185EXPORT_SYMBOL(iio_buffer_init);
186
187static ssize_t iio_show_scan_index(struct device *dev,
188				   struct device_attribute *attr,
189				   char *buf)
190{
191	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
192}
193
194static ssize_t iio_show_fixed_type(struct device *dev,
195				   struct device_attribute *attr,
196				   char *buf)
197{
198	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
199	u8 type = this_attr->c->scan_type.endianness;
200
201	if (type == IIO_CPU) {
202#ifdef __LITTLE_ENDIAN
203		type = IIO_LE;
204#else
205		type = IIO_BE;
206#endif
207	}
208	if (this_attr->c->scan_type.repeat > 1)
209		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
210		       iio_endian_prefix[type],
211		       this_attr->c->scan_type.sign,
212		       this_attr->c->scan_type.realbits,
213		       this_attr->c->scan_type.storagebits,
214		       this_attr->c->scan_type.repeat,
215		       this_attr->c->scan_type.shift);
216		else
217			return sprintf(buf, "%s:%c%d/%d>>%u\n",
218		       iio_endian_prefix[type],
219		       this_attr->c->scan_type.sign,
220		       this_attr->c->scan_type.realbits,
221		       this_attr->c->scan_type.storagebits,
222		       this_attr->c->scan_type.shift);
223}
224
225static ssize_t iio_scan_el_show(struct device *dev,
226				struct device_attribute *attr,
227				char *buf)
228{
229	int ret;
230	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
231
232	/* Ensure ret is 0 or 1. */
233	ret = !!test_bit(to_iio_dev_attr(attr)->address,
234		       indio_dev->buffer->scan_mask);
235
236	return sprintf(buf, "%d\n", ret);
237}
238
239/* Note NULL used as error indicator as it doesn't make sense. */
240static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
241					  unsigned int masklength,
242					  const unsigned long *mask)
243{
244	if (bitmap_empty(mask, masklength))
245		return NULL;
246	while (*av_masks) {
247		if (bitmap_subset(mask, av_masks, masklength))
248			return av_masks;
249		av_masks += BITS_TO_LONGS(masklength);
250	}
251	return NULL;
252}
253
254static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
255	const unsigned long *mask)
256{
257	if (!indio_dev->setup_ops->validate_scan_mask)
258		return true;
259
260	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
261}
262
263/**
264 * iio_scan_mask_set() - set particular bit in the scan mask
265 * @indio_dev: the iio device
266 * @buffer: the buffer whose scan mask we are interested in
267 * @bit: the bit to be set.
268 *
269 * Note that at this point we have no way of knowing what other
270 * buffers might request, hence this code only verifies that the
271 * individual buffers request is plausible.
272 */
273static int iio_scan_mask_set(struct iio_dev *indio_dev,
274		      struct iio_buffer *buffer, int bit)
275{
276	const unsigned long *mask;
277	unsigned long *trialmask;
278
279	trialmask = kmalloc(sizeof(*trialmask)*
280			    BITS_TO_LONGS(indio_dev->masklength),
281			    GFP_KERNEL);
282
283	if (trialmask == NULL)
284		return -ENOMEM;
285	if (!indio_dev->masklength) {
286		WARN_ON("Trying to set scanmask prior to registering buffer\n");
287		goto err_invalid_mask;
288	}
289	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
290	set_bit(bit, trialmask);
291
292	if (!iio_validate_scan_mask(indio_dev, trialmask))
293		goto err_invalid_mask;
294
295	if (indio_dev->available_scan_masks) {
296		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
297					   indio_dev->masklength,
298					   trialmask);
299		if (!mask)
300			goto err_invalid_mask;
301	}
302	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
303
304	kfree(trialmask);
305
306	return 0;
307
308err_invalid_mask:
309	kfree(trialmask);
310	return -EINVAL;
311}
312
313static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
314{
315	clear_bit(bit, buffer->scan_mask);
316	return 0;
317}
318
319static ssize_t iio_scan_el_store(struct device *dev,
320				 struct device_attribute *attr,
321				 const char *buf,
322				 size_t len)
323{
324	int ret;
325	bool state;
326	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
327	struct iio_buffer *buffer = indio_dev->buffer;
328	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
329
330	ret = strtobool(buf, &state);
331	if (ret < 0)
332		return ret;
333	mutex_lock(&indio_dev->mlock);
334	if (iio_buffer_is_active(indio_dev->buffer)) {
335		ret = -EBUSY;
336		goto error_ret;
337	}
338	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
339	if (ret < 0)
340		goto error_ret;
341	if (!state && ret) {
342		ret = iio_scan_mask_clear(buffer, this_attr->address);
343		if (ret)
344			goto error_ret;
345	} else if (state && !ret) {
346		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
347		if (ret)
348			goto error_ret;
349	}
350
351error_ret:
352	mutex_unlock(&indio_dev->mlock);
353
354	return ret < 0 ? ret : len;
355
356}
357
358static ssize_t iio_scan_el_ts_show(struct device *dev,
359				   struct device_attribute *attr,
360				   char *buf)
361{
362	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
363	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
364}
365
366static ssize_t iio_scan_el_ts_store(struct device *dev,
367				    struct device_attribute *attr,
368				    const char *buf,
369				    size_t len)
370{
371	int ret;
372	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
373	bool state;
374
375	ret = strtobool(buf, &state);
376	if (ret < 0)
377		return ret;
378
379	mutex_lock(&indio_dev->mlock);
380	if (iio_buffer_is_active(indio_dev->buffer)) {
381		ret = -EBUSY;
382		goto error_ret;
383	}
384	indio_dev->buffer->scan_timestamp = state;
385error_ret:
386	mutex_unlock(&indio_dev->mlock);
387
388	return ret ? ret : len;
389}
390
391static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
392					const struct iio_chan_spec *chan)
393{
394	int ret, attrcount = 0;
395	struct iio_buffer *buffer = indio_dev->buffer;
396
397	ret = __iio_add_chan_devattr("index",
398				     chan,
399				     &iio_show_scan_index,
400				     NULL,
401				     0,
402				     IIO_SEPARATE,
403				     &indio_dev->dev,
404				     &buffer->scan_el_dev_attr_list);
405	if (ret)
406		return ret;
407	attrcount++;
408	ret = __iio_add_chan_devattr("type",
409				     chan,
410				     &iio_show_fixed_type,
411				     NULL,
412				     0,
413				     0,
414				     &indio_dev->dev,
415				     &buffer->scan_el_dev_attr_list);
416	if (ret)
417		return ret;
418	attrcount++;
419	if (chan->type != IIO_TIMESTAMP)
420		ret = __iio_add_chan_devattr("en",
421					     chan,
422					     &iio_scan_el_show,
423					     &iio_scan_el_store,
424					     chan->scan_index,
425					     0,
426					     &indio_dev->dev,
427					     &buffer->scan_el_dev_attr_list);
428	else
429		ret = __iio_add_chan_devattr("en",
430					     chan,
431					     &iio_scan_el_ts_show,
432					     &iio_scan_el_ts_store,
433					     chan->scan_index,
434					     0,
435					     &indio_dev->dev,
436					     &buffer->scan_el_dev_attr_list);
437	if (ret)
438		return ret;
439	attrcount++;
440	ret = attrcount;
441	return ret;
442}
443
444static ssize_t iio_buffer_read_length(struct device *dev,
445				      struct device_attribute *attr,
446				      char *buf)
447{
448	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
449	struct iio_buffer *buffer = indio_dev->buffer;
450
451	return sprintf(buf, "%d\n", buffer->length);
452}
453
454static ssize_t iio_buffer_write_length(struct device *dev,
455				       struct device_attribute *attr,
456				       const char *buf, size_t len)
457{
458	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
459	struct iio_buffer *buffer = indio_dev->buffer;
460	unsigned int val;
461	int ret;
462
463	ret = kstrtouint(buf, 10, &val);
464	if (ret)
465		return ret;
466
467	if (val == buffer->length)
468		return len;
469
470	mutex_lock(&indio_dev->mlock);
471	if (iio_buffer_is_active(indio_dev->buffer)) {
472		ret = -EBUSY;
473	} else {
474		buffer->access->set_length(buffer, val);
475		ret = 0;
476	}
477	if (ret)
478		goto out;
479	if (buffer->length && buffer->length < buffer->watermark)
480		buffer->watermark = buffer->length;
481out:
482	mutex_unlock(&indio_dev->mlock);
483
484	return ret ? ret : len;
485}
486
487static ssize_t iio_buffer_show_enable(struct device *dev,
488				      struct device_attribute *attr,
489				      char *buf)
490{
491	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
492	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
493}
494
495static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
496				const unsigned long *mask, bool timestamp)
497{
498	const struct iio_chan_spec *ch;
499	unsigned bytes = 0;
500	int length, i;
501
502	/* How much space will the demuxed element take? */
503	for_each_set_bit(i, mask,
504			 indio_dev->masklength) {
505		ch = iio_find_channel_from_si(indio_dev, i);
506		if (ch->scan_type.repeat > 1)
507			length = ch->scan_type.storagebits / 8 *
508				ch->scan_type.repeat;
509		else
510			length = ch->scan_type.storagebits / 8;
511		bytes = ALIGN(bytes, length);
512		bytes += length;
513	}
514	if (timestamp) {
515		ch = iio_find_channel_from_si(indio_dev,
516					      indio_dev->scan_index_timestamp);
517		if (ch->scan_type.repeat > 1)
518			length = ch->scan_type.storagebits / 8 *
519				ch->scan_type.repeat;
520		else
521			length = ch->scan_type.storagebits / 8;
522		bytes = ALIGN(bytes, length);
523		bytes += length;
524	}
525	return bytes;
526}
527
528static void iio_buffer_activate(struct iio_dev *indio_dev,
529	struct iio_buffer *buffer)
530{
531	iio_buffer_get(buffer);
532	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
533}
534
535static void iio_buffer_deactivate(struct iio_buffer *buffer)
536{
537	list_del_init(&buffer->buffer_list);
538	wake_up_interruptible(&buffer->pollq);
539	iio_buffer_put(buffer);
540}
541
542void iio_disable_all_buffers(struct iio_dev *indio_dev)
543{
544	struct iio_buffer *buffer, *_buffer;
545
546	if (list_empty(&indio_dev->buffer_list))
547		return;
548
549	if (indio_dev->setup_ops->predisable)
550		indio_dev->setup_ops->predisable(indio_dev);
551
552	list_for_each_entry_safe(buffer, _buffer,
553			&indio_dev->buffer_list, buffer_list)
554		iio_buffer_deactivate(buffer);
555
556	indio_dev->currentmode = INDIO_DIRECT_MODE;
557	if (indio_dev->setup_ops->postdisable)
558		indio_dev->setup_ops->postdisable(indio_dev);
559
560	if (indio_dev->available_scan_masks == NULL)
561		kfree(indio_dev->active_scan_mask);
562}
563
564static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
565	struct iio_buffer *buffer)
566{
567	unsigned int bytes;
568
569	if (!buffer->access->set_bytes_per_datum)
570		return;
571
572	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
573		buffer->scan_timestamp);
574
575	buffer->access->set_bytes_per_datum(buffer, bytes);
576}
577
578static int __iio_update_buffers(struct iio_dev *indio_dev,
579		       struct iio_buffer *insert_buffer,
580		       struct iio_buffer *remove_buffer)
581{
582	int ret;
583	int success = 0;
584	struct iio_buffer *buffer;
585	unsigned long *compound_mask;
586	const unsigned long *old_mask;
587
588	/* Wind down existing buffers - iff there are any */
589	if (!list_empty(&indio_dev->buffer_list)) {
590		if (indio_dev->setup_ops->predisable) {
591			ret = indio_dev->setup_ops->predisable(indio_dev);
592			if (ret)
593				return ret;
594		}
595		indio_dev->currentmode = INDIO_DIRECT_MODE;
596		if (indio_dev->setup_ops->postdisable) {
597			ret = indio_dev->setup_ops->postdisable(indio_dev);
598			if (ret)
599				return ret;
600		}
601	}
602	/* Keep a copy of current setup to allow roll back */
603	old_mask = indio_dev->active_scan_mask;
604	if (!indio_dev->available_scan_masks)
605		indio_dev->active_scan_mask = NULL;
606
607	if (remove_buffer)
608		iio_buffer_deactivate(remove_buffer);
609	if (insert_buffer)
610		iio_buffer_activate(indio_dev, insert_buffer);
611
612	/* If no buffers in list, we are done */
613	if (list_empty(&indio_dev->buffer_list)) {
614		indio_dev->currentmode = INDIO_DIRECT_MODE;
615		if (indio_dev->available_scan_masks == NULL)
616			kfree(old_mask);
617		return 0;
618	}
619
620	/* What scan mask do we actually have? */
621	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
622				sizeof(long), GFP_KERNEL);
623	if (compound_mask == NULL) {
624		if (indio_dev->available_scan_masks == NULL)
625			kfree(old_mask);
626		return -ENOMEM;
627	}
628	indio_dev->scan_timestamp = 0;
629
630	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
631		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
632			  indio_dev->masklength);
633		indio_dev->scan_timestamp |= buffer->scan_timestamp;
634	}
635	if (indio_dev->available_scan_masks) {
636		indio_dev->active_scan_mask =
637			iio_scan_mask_match(indio_dev->available_scan_masks,
638					    indio_dev->masklength,
639					    compound_mask);
640		if (indio_dev->active_scan_mask == NULL) {
641			/*
642			 * Roll back.
643			 * Note can only occur when adding a buffer.
644			 */
645			iio_buffer_deactivate(insert_buffer);
646			if (old_mask) {
647				indio_dev->active_scan_mask = old_mask;
648				success = -EINVAL;
649			}
650			else {
651				kfree(compound_mask);
652				ret = -EINVAL;
653				return ret;
654			}
655		}
656	} else {
657		indio_dev->active_scan_mask = compound_mask;
658	}
659
660	iio_update_demux(indio_dev);
661
662	/* Wind up again */
663	if (indio_dev->setup_ops->preenable) {
664		ret = indio_dev->setup_ops->preenable(indio_dev);
665		if (ret) {
666			printk(KERN_ERR
667			       "Buffer not started: buffer preenable failed (%d)\n", ret);
668			goto error_remove_inserted;
669		}
670	}
671	indio_dev->scan_bytes =
672		iio_compute_scan_bytes(indio_dev,
673				       indio_dev->active_scan_mask,
674				       indio_dev->scan_timestamp);
675	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
676		iio_buffer_update_bytes_per_datum(indio_dev, buffer);
677		if (buffer->access->request_update) {
678			ret = buffer->access->request_update(buffer);
679			if (ret) {
680				printk(KERN_INFO
681				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
682				goto error_run_postdisable;
683			}
684		}
685	}
686	if (indio_dev->info->update_scan_mode) {
687		ret = indio_dev->info
688			->update_scan_mode(indio_dev,
689					   indio_dev->active_scan_mask);
690		if (ret < 0) {
691			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
692			goto error_run_postdisable;
693		}
694	}
695	/* Definitely possible for devices to support both of these. */
696	if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
697		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
698	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
699		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
700	} else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
701		indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
702	} else { /* Should never be reached */
703		/* Can only occur on first buffer */
704		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
705			pr_info("Buffer not started: no trigger\n");
706		ret = -EINVAL;
707		goto error_run_postdisable;
708	}
709
710	if (indio_dev->setup_ops->postenable) {
711		ret = indio_dev->setup_ops->postenable(indio_dev);
712		if (ret) {
713			printk(KERN_INFO
714			       "Buffer not started: postenable failed (%d)\n", ret);
715			indio_dev->currentmode = INDIO_DIRECT_MODE;
716			if (indio_dev->setup_ops->postdisable)
717				indio_dev->setup_ops->postdisable(indio_dev);
718			goto error_disable_all_buffers;
719		}
720	}
721
722	if (indio_dev->available_scan_masks)
723		kfree(compound_mask);
724	else
725		kfree(old_mask);
726
727	return success;
728
729error_disable_all_buffers:
730	indio_dev->currentmode = INDIO_DIRECT_MODE;
731error_run_postdisable:
732	if (indio_dev->setup_ops->postdisable)
733		indio_dev->setup_ops->postdisable(indio_dev);
734error_remove_inserted:
735	if (insert_buffer)
736		iio_buffer_deactivate(insert_buffer);
737	indio_dev->active_scan_mask = old_mask;
738	kfree(compound_mask);
739	return ret;
740}
741
742int iio_update_buffers(struct iio_dev *indio_dev,
743		       struct iio_buffer *insert_buffer,
744		       struct iio_buffer *remove_buffer)
745{
746	int ret;
747
748	if (insert_buffer == remove_buffer)
749		return 0;
750
751	mutex_lock(&indio_dev->info_exist_lock);
752	mutex_lock(&indio_dev->mlock);
753
754	if (insert_buffer && iio_buffer_is_active(insert_buffer))
755		insert_buffer = NULL;
756
757	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
758		remove_buffer = NULL;
759
760	if (!insert_buffer && !remove_buffer) {
761		ret = 0;
762		goto out_unlock;
763	}
764
765	if (indio_dev->info == NULL) {
766		ret = -ENODEV;
767		goto out_unlock;
768	}
769
770	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
771
772out_unlock:
773	mutex_unlock(&indio_dev->mlock);
774	mutex_unlock(&indio_dev->info_exist_lock);
775
776	return ret;
777}
778EXPORT_SYMBOL_GPL(iio_update_buffers);
779
780static ssize_t iio_buffer_store_enable(struct device *dev,
781				       struct device_attribute *attr,
782				       const char *buf,
783				       size_t len)
784{
785	int ret;
786	bool requested_state;
787	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
788	bool inlist;
789
790	ret = strtobool(buf, &requested_state);
791	if (ret < 0)
792		return ret;
793
794	mutex_lock(&indio_dev->mlock);
795
796	/* Find out if it is in the list */
797	inlist = iio_buffer_is_active(indio_dev->buffer);
798	/* Already in desired state */
799	if (inlist == requested_state)
800		goto done;
801
802	if (requested_state)
803		ret = __iio_update_buffers(indio_dev,
804					 indio_dev->buffer, NULL);
805	else
806		ret = __iio_update_buffers(indio_dev,
807					 NULL, indio_dev->buffer);
808
809	if (ret < 0)
810		goto done;
811done:
812	mutex_unlock(&indio_dev->mlock);
813	return (ret < 0) ? ret : len;
814}
815
816static const char * const iio_scan_elements_group_name = "scan_elements";
817
818static ssize_t iio_buffer_show_watermark(struct device *dev,
819					 struct device_attribute *attr,
820					 char *buf)
821{
822	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
823	struct iio_buffer *buffer = indio_dev->buffer;
824
825	return sprintf(buf, "%u\n", buffer->watermark);
826}
827
828static ssize_t iio_buffer_store_watermark(struct device *dev,
829					  struct device_attribute *attr,
830					  const char *buf,
831					  size_t len)
832{
833	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
834	struct iio_buffer *buffer = indio_dev->buffer;
835	unsigned int val;
836	int ret;
837
838	ret = kstrtouint(buf, 10, &val);
839	if (ret)
840		return ret;
841	if (!val)
842		return -EINVAL;
843
844	mutex_lock(&indio_dev->mlock);
845
846	if (val > buffer->length) {
847		ret = -EINVAL;
848		goto out;
849	}
850
851	if (iio_buffer_is_active(indio_dev->buffer)) {
852		ret = -EBUSY;
853		goto out;
854	}
855
856	buffer->watermark = val;
857
858	if (indio_dev->info->hwfifo_set_watermark)
859		indio_dev->info->hwfifo_set_watermark(indio_dev, val);
860out:
861	mutex_unlock(&indio_dev->mlock);
862
863	return ret ? ret : len;
864}
865
866static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
867		   iio_buffer_write_length);
868static struct device_attribute dev_attr_length_ro = __ATTR(length,
869	S_IRUGO, iio_buffer_read_length, NULL);
870static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
871		   iio_buffer_show_enable, iio_buffer_store_enable);
872static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
873		   iio_buffer_show_watermark, iio_buffer_store_watermark);
874
875static struct attribute *iio_buffer_attrs[] = {
876	&dev_attr_length.attr,
877	&dev_attr_enable.attr,
878	&dev_attr_watermark.attr,
879};
880
881int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
882{
883	struct iio_dev_attr *p;
884	struct attribute **attr;
885	struct iio_buffer *buffer = indio_dev->buffer;
886	int ret, i, attrn, attrcount, attrcount_orig = 0;
887	const struct iio_chan_spec *channels;
888
889	if (!buffer)
890		return 0;
891
892	attrcount = 0;
893	if (buffer->attrs) {
894		while (buffer->attrs[attrcount] != NULL)
895			attrcount++;
896	}
897
898	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
899		       sizeof(struct attribute *), GFP_KERNEL);
900	if (!attr)
901		return -ENOMEM;
902
903	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
904	if (!buffer->access->set_length)
905		attr[0] = &dev_attr_length_ro.attr;
906
907	if (buffer->attrs)
908		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
909		       sizeof(struct attribute *) * attrcount);
910
911	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
912
913	buffer->buffer_group.name = "buffer";
914	buffer->buffer_group.attrs = attr;
915
916	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
917
918	if (buffer->scan_el_attrs != NULL) {
919		attr = buffer->scan_el_attrs->attrs;
920		while (*attr++ != NULL)
921			attrcount_orig++;
922	}
923	attrcount = attrcount_orig;
924	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
925	channels = indio_dev->channels;
926	if (channels) {
927		/* new magic */
928		for (i = 0; i < indio_dev->num_channels; i++) {
929			if (channels[i].scan_index < 0)
930				continue;
931
932			/* Establish necessary mask length */
933			if (channels[i].scan_index >
934			    (int)indio_dev->masklength - 1)
935				indio_dev->masklength
936					= channels[i].scan_index + 1;
937
938			ret = iio_buffer_add_channel_sysfs(indio_dev,
939							 &channels[i]);
940			if (ret < 0)
941				goto error_cleanup_dynamic;
942			attrcount += ret;
943			if (channels[i].type == IIO_TIMESTAMP)
944				indio_dev->scan_index_timestamp =
945					channels[i].scan_index;
946		}
947		if (indio_dev->masklength && buffer->scan_mask == NULL) {
948			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
949						    sizeof(*buffer->scan_mask),
950						    GFP_KERNEL);
951			if (buffer->scan_mask == NULL) {
952				ret = -ENOMEM;
953				goto error_cleanup_dynamic;
954			}
955		}
956	}
957
958	buffer->scan_el_group.name = iio_scan_elements_group_name;
959
960	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
961					      sizeof(buffer->scan_el_group.attrs[0]),
962					      GFP_KERNEL);
963	if (buffer->scan_el_group.attrs == NULL) {
964		ret = -ENOMEM;
965		goto error_free_scan_mask;
966	}
967	if (buffer->scan_el_attrs)
968		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
969		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
970	attrn = attrcount_orig;
971
972	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
973		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
974	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
975
976	return 0;
977
978error_free_scan_mask:
979	kfree(buffer->scan_mask);
980error_cleanup_dynamic:
981	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
982	kfree(indio_dev->buffer->buffer_group.attrs);
983
984	return ret;
985}
986
987void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
988{
989	if (!indio_dev->buffer)
990		return;
991
992	kfree(indio_dev->buffer->scan_mask);
993	kfree(indio_dev->buffer->buffer_group.attrs);
994	kfree(indio_dev->buffer->scan_el_group.attrs);
995	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
996}
997
998/**
999 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1000 * @indio_dev: the iio device
1001 * @mask: scan mask to be checked
1002 *
1003 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1004 * can be used for devices where only one channel can be active for sampling at
1005 * a time.
1006 */
1007bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1008	const unsigned long *mask)
1009{
1010	return bitmap_weight(mask, indio_dev->masklength) == 1;
1011}
1012EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1013
1014int iio_scan_mask_query(struct iio_dev *indio_dev,
1015			struct iio_buffer *buffer, int bit)
1016{
1017	if (bit > indio_dev->masklength)
1018		return -EINVAL;
1019
1020	if (!buffer->scan_mask)
1021		return 0;
1022
1023	/* Ensure return value is 0 or 1. */
1024	return !!test_bit(bit, buffer->scan_mask);
1025};
1026EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1027
1028/**
1029 * struct iio_demux_table() - table describing demux memcpy ops
1030 * @from:	index to copy from
1031 * @to:		index to copy to
1032 * @length:	how many bytes to copy
1033 * @l:		list head used for management
1034 */
1035struct iio_demux_table {
1036	unsigned from;
1037	unsigned to;
1038	unsigned length;
1039	struct list_head l;
1040};
1041
1042static const void *iio_demux(struct iio_buffer *buffer,
1043				 const void *datain)
1044{
1045	struct iio_demux_table *t;
1046
1047	if (list_empty(&buffer->demux_list))
1048		return datain;
1049	list_for_each_entry(t, &buffer->demux_list, l)
1050		memcpy(buffer->demux_bounce + t->to,
1051		       datain + t->from, t->length);
1052
1053	return buffer->demux_bounce;
1054}
1055
1056static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1057{
1058	const void *dataout = iio_demux(buffer, data);
1059	int ret;
1060
1061	ret = buffer->access->store_to(buffer, dataout);
1062	if (ret)
1063		return ret;
1064
1065	/*
1066	 * We can't just test for watermark to decide if we wake the poll queue
1067	 * because read may request less samples than the watermark.
1068	 */
1069	wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1070	return 0;
1071}
1072
1073static void iio_buffer_demux_free(struct iio_buffer *buffer)
1074{
1075	struct iio_demux_table *p, *q;
1076	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1077		list_del(&p->l);
1078		kfree(p);
1079	}
1080}
1081
1082
1083int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1084{
1085	int ret;
1086	struct iio_buffer *buf;
1087
1088	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1089		ret = iio_push_to_buffer(buf, data);
1090		if (ret < 0)
1091			return ret;
1092	}
1093
1094	return 0;
1095}
1096EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1097
1098static int iio_buffer_add_demux(struct iio_buffer *buffer,
1099	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
1100	unsigned int length)
1101{
1102
1103	if (*p && (*p)->from + (*p)->length == in_loc &&
1104		(*p)->to + (*p)->length == out_loc) {
1105		(*p)->length += length;
1106	} else {
1107		*p = kmalloc(sizeof(**p), GFP_KERNEL);
1108		if (*p == NULL)
1109			return -ENOMEM;
1110		(*p)->from = in_loc;
1111		(*p)->to = out_loc;
1112		(*p)->length = length;
1113		list_add_tail(&(*p)->l, &buffer->demux_list);
1114	}
1115
1116	return 0;
1117}
1118
1119static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1120				   struct iio_buffer *buffer)
1121{
1122	const struct iio_chan_spec *ch;
1123	int ret, in_ind = -1, out_ind, length;
1124	unsigned in_loc = 0, out_loc = 0;
1125	struct iio_demux_table *p = NULL;
1126
1127	/* Clear out any old demux */
1128	iio_buffer_demux_free(buffer);
1129	kfree(buffer->demux_bounce);
1130	buffer->demux_bounce = NULL;
1131
1132	/* First work out which scan mode we will actually have */
1133	if (bitmap_equal(indio_dev->active_scan_mask,
1134			 buffer->scan_mask,
1135			 indio_dev->masklength))
1136		return 0;
1137
1138	/* Now we have the two masks, work from least sig and build up sizes */
1139	for_each_set_bit(out_ind,
1140			 buffer->scan_mask,
1141			 indio_dev->masklength) {
1142		in_ind = find_next_bit(indio_dev->active_scan_mask,
1143				       indio_dev->masklength,
1144				       in_ind + 1);
1145		while (in_ind != out_ind) {
1146			in_ind = find_next_bit(indio_dev->active_scan_mask,
1147					       indio_dev->masklength,
1148					       in_ind + 1);
1149			ch = iio_find_channel_from_si(indio_dev, in_ind);
1150			if (ch->scan_type.repeat > 1)
1151				length = ch->scan_type.storagebits / 8 *
1152					ch->scan_type.repeat;
1153			else
1154				length = ch->scan_type.storagebits / 8;
1155			/* Make sure we are aligned */
1156			in_loc = roundup(in_loc, length) + length;
1157		}
1158		ch = iio_find_channel_from_si(indio_dev, in_ind);
1159		if (ch->scan_type.repeat > 1)
1160			length = ch->scan_type.storagebits / 8 *
1161				ch->scan_type.repeat;
1162		else
1163			length = ch->scan_type.storagebits / 8;
1164		out_loc = roundup(out_loc, length);
1165		in_loc = roundup(in_loc, length);
1166		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1167		if (ret)
1168			goto error_clear_mux_table;
1169		out_loc += length;
1170		in_loc += length;
1171	}
1172	/* Relies on scan_timestamp being last */
1173	if (buffer->scan_timestamp) {
1174		ch = iio_find_channel_from_si(indio_dev,
1175			indio_dev->scan_index_timestamp);
1176		if (ch->scan_type.repeat > 1)
1177			length = ch->scan_type.storagebits / 8 *
1178				ch->scan_type.repeat;
1179		else
1180			length = ch->scan_type.storagebits / 8;
1181		out_loc = roundup(out_loc, length);
1182		in_loc = roundup(in_loc, length);
1183		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1184		if (ret)
1185			goto error_clear_mux_table;
1186		out_loc += length;
1187		in_loc += length;
1188	}
1189	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1190	if (buffer->demux_bounce == NULL) {
1191		ret = -ENOMEM;
1192		goto error_clear_mux_table;
1193	}
1194	return 0;
1195
1196error_clear_mux_table:
1197	iio_buffer_demux_free(buffer);
1198
1199	return ret;
1200}
1201
1202int iio_update_demux(struct iio_dev *indio_dev)
1203{
1204	struct iio_buffer *buffer;
1205	int ret;
1206
1207	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1208		ret = iio_buffer_update_demux(indio_dev, buffer);
1209		if (ret < 0)
1210			goto error_clear_mux_table;
1211	}
1212	return 0;
1213
1214error_clear_mux_table:
1215	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1216		iio_buffer_demux_free(buffer);
1217
1218	return ret;
1219}
1220EXPORT_SYMBOL_GPL(iio_update_demux);
1221
1222/**
1223 * iio_buffer_release() - Free a buffer's resources
1224 * @ref: Pointer to the kref embedded in the iio_buffer struct
1225 *
1226 * This function is called when the last reference to the buffer has been
1227 * dropped. It will typically free all resources allocated by the buffer. Do not
1228 * call this function manually, always use iio_buffer_put() when done using a
1229 * buffer.
1230 */
1231static void iio_buffer_release(struct kref *ref)
1232{
1233	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1234
1235	buffer->access->release(buffer);
1236}
1237
1238/**
1239 * iio_buffer_get() - Grab a reference to the buffer
1240 * @buffer: The buffer to grab a reference for, may be NULL
1241 *
1242 * Returns the pointer to the buffer that was passed into the function.
1243 */
1244struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1245{
1246	if (buffer)
1247		kref_get(&buffer->ref);
1248
1249	return buffer;
1250}
1251EXPORT_SYMBOL_GPL(iio_buffer_get);
1252
1253/**
1254 * iio_buffer_put() - Release the reference to the buffer
1255 * @buffer: The buffer to release the reference for, may be NULL
1256 */
1257void iio_buffer_put(struct iio_buffer *buffer)
1258{
1259	if (buffer)
1260		kref_put(&buffer->ref, iio_buffer_release);
1261}
1262EXPORT_SYMBOL_GPL(iio_buffer_put);
1263