1#include <linux/kernel.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/export.h>
5#include <linux/iio/buffer.h>
6#include <linux/iio/consumer.h>
7
8struct iio_cb_buffer {
9	struct iio_buffer buffer;
10	int (*cb)(const void *data, void *private);
11	void *private;
12	struct iio_channel *channels;
13};
14
15static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
16{
17	return container_of(buffer, struct iio_cb_buffer, buffer);
18}
19
20static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
21{
22	struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
23	return cb_buff->cb(data, cb_buff->private);
24}
25
26static void iio_buffer_cb_release(struct iio_buffer *buffer)
27{
28	struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
29	kfree(cb_buff->buffer.scan_mask);
30	kfree(cb_buff);
31}
32
33static const struct iio_buffer_access_funcs iio_cb_access = {
34	.store_to = &iio_buffer_cb_store_to,
35	.release = &iio_buffer_cb_release,
36};
37
38struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
39					     int (*cb)(const void *data,
40						       void *private),
41					     void *private)
42{
43	int ret;
44	struct iio_cb_buffer *cb_buff;
45	struct iio_dev *indio_dev;
46	struct iio_channel *chan;
47
48	cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
49	if (cb_buff == NULL)
50		return ERR_PTR(-ENOMEM);
51
52	iio_buffer_init(&cb_buff->buffer);
53
54	cb_buff->private = private;
55	cb_buff->cb = cb;
56	cb_buff->buffer.access = &iio_cb_access;
57	INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
58
59	cb_buff->channels = iio_channel_get_all(dev);
60	if (IS_ERR(cb_buff->channels)) {
61		ret = PTR_ERR(cb_buff->channels);
62		goto error_free_cb_buff;
63	}
64
65	indio_dev = cb_buff->channels[0].indio_dev;
66	cb_buff->buffer.scan_mask
67		= kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
68			  GFP_KERNEL);
69	if (cb_buff->buffer.scan_mask == NULL) {
70		ret = -ENOMEM;
71		goto error_release_channels;
72	}
73	chan = &cb_buff->channels[0];
74	while (chan->indio_dev) {
75		if (chan->indio_dev != indio_dev) {
76			ret = -EINVAL;
77			goto error_free_scan_mask;
78		}
79		set_bit(chan->channel->scan_index,
80			cb_buff->buffer.scan_mask);
81		chan++;
82	}
83
84	return cb_buff;
85
86error_free_scan_mask:
87	kfree(cb_buff->buffer.scan_mask);
88error_release_channels:
89	iio_channel_release_all(cb_buff->channels);
90error_free_cb_buff:
91	kfree(cb_buff);
92	return ERR_PTR(ret);
93}
94EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
95
96int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
97{
98	return iio_update_buffers(cb_buff->channels[0].indio_dev,
99				  &cb_buff->buffer,
100				  NULL);
101}
102EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
103
104void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
105{
106	iio_update_buffers(cb_buff->channels[0].indio_dev,
107			   NULL,
108			   &cb_buff->buffer);
109}
110EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
111
112void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
113{
114	iio_channel_release_all(cb_buff->channels);
115	iio_buffer_put(&cb_buff->buffer);
116}
117EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
118
119struct iio_channel
120*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
121{
122	return cb_buffer->channels;
123}
124EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
125