1/* uislib.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT.  See the GNU General Public License for more
15 * details.
16 */
17
18/* @ALL_INSPECTED */
19#define EXPORT_SYMTAB
20#include <linux/kernel.h>
21#include <linux/highmem.h>
22#ifdef CONFIG_MODVERSIONS
23#include <config/modversions.h>
24#endif
25#include <linux/module.h>
26#include <linux/debugfs.h>
27
28#include <linux/types.h>
29#include <linux/uuid.h>
30
31#include <linux/version.h>
32#include "diagnostics/appos_subsystems.h"
33#include "uisutils.h"
34#include "vbuschannel.h"
35
36#include <linux/proc_fs.h>
37#include <linux/uaccess.h>	/* for copy_from_user */
38#include <linux/ctype.h>	/* for toupper */
39#include <linux/list.h>
40
41#include "sparstop.h"
42#include "visorchipset.h"
43#include "version.h"
44#include "guestlinuxdebug.h"
45
46#define SET_PROC_OWNER(x, y)
47
48#define POLLJIFFIES_NORMAL 1
49/* Choose whether or not you want to wakeup the request-polling thread
50 * after an IO termination:
51 * this is shorter than using __FILE__ (full path name) in
52 * debug/info/error messages
53 */
54#define CURRENT_FILE_PC UISLIB_PC_uislib_c
55#define __MYFILE__ "uislib.c"
56
57/* global function pointers that act as callback functions into virtpcimod */
58int (*virt_control_chan_func)(struct guest_msgs *);
59
60static int debug_buf_valid;
61static char *debug_buf;	/* Note this MUST be global,
62					 * because the contents must */
63static unsigned int chipset_inited;
64
65#define WAIT_ON_CALLBACK(handle)	\
66	do {			\
67		if (handle)		\
68			break;		\
69		UIS_THREAD_WAIT;	\
70	} while (1)
71
72static struct bus_info *bus_list;
73static rwlock_t bus_list_lock;
74static int bus_list_count;	/* number of buses in the list */
75static int max_bus_count;		/* maximum number of buses expected */
76static u64 phys_data_chan;
77static int platform_no;
78
79static struct uisthread_info incoming_ti;
80static BOOL incoming_started = FALSE;
81static LIST_HEAD(poll_dev_chan);
82static unsigned long long tot_moved_to_tail_cnt;
83static unsigned long long tot_wait_cnt;
84static unsigned long long tot_wakeup_cnt;
85static unsigned long long tot_schedule_cnt;
86static int en_smart_wakeup = 1;
87static DEFINE_SEMAPHORE(poll_dev_lock);	/* unlocked */
88static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q);
89static int poll_dev_start;
90
91#define CALLHOME_PROC_ENTRY_FN "callhome"
92#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
93
94#define DIR_DEBUGFS_ENTRY "uislib"
95static struct dentry *dir_debugfs;
96
97#define PLATFORMNUMBER_DEBUGFS_ENTRY_FN "platform"
98static struct dentry *platformnumber_debugfs_read;
99
100#define CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN "cycles_before_wait"
101static struct dentry *cycles_before_wait_debugfs_read;
102
103#define SMART_WAKEUP_DEBUGFS_ENTRY_FN "smart_wakeup"
104static struct dentry *smart_wakeup_debugfs_entry;
105
106#define INFO_DEBUGFS_ENTRY_FN "info"
107static struct dentry *info_debugfs_entry;
108
109static unsigned long long cycles_before_wait, wait_cycles;
110
111/*****************************************************/
112/* local functions                                   */
113/*****************************************************/
114
115static ssize_t info_debugfs_read(struct file *file, char __user *buf,
116				 size_t len, loff_t *offset);
117static const struct file_operations debugfs_info_fops = {
118	.read = info_debugfs_read,
119};
120
121static void
122init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
123{
124	memset(msg, 0, sizeof(struct controlvm_message));
125	msg->hdr.id = id;
126	msg->hdr.flags.response_expected = rsp;
127	msg->hdr.flags.server = svr;
128}
129
130static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes)
131{
132	void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes);
133
134	if (!ch)
135		return NULL;
136
137	if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) {
138		uislib_iounmap(ch);
139		return NULL;
140	}
141	return ch;
142}
143
144static int
145create_bus(struct controlvm_message *msg, char *buf)
146{
147	u32 bus_no, dev_count;
148	struct bus_info *tmp, *bus;
149	size_t size;
150
151	if (max_bus_count == bus_list_count) {
152		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count,
153				 POSTCODE_SEVERITY_ERR);
154		return CONTROLVM_RESP_ERROR_MAX_BUSES;
155	}
156
157	bus_no = msg->cmd.create_bus.bus_no;
158	dev_count = msg->cmd.create_bus.dev_count;
159
160	POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count,
161			 POSTCODE_SEVERITY_INFO);
162
163	size =
164	    sizeof(struct bus_info) +
165	    (dev_count * sizeof(struct device_info *));
166	bus = kzalloc(size, GFP_ATOMIC);
167	if (!bus) {
168		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
169				 POSTCODE_SEVERITY_ERR);
170		return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
171	}
172
173	/* Currently by default, the bus Number is the GuestHandle.
174	 * Configure Bus message can override this.
175	 */
176	if (msg->hdr.flags.test_message) {
177		/* This implies we're the IOVM so set guest handle to 0... */
178		bus->guest_handle = 0;
179		bus->bus_no = bus_no;
180		bus->local_vnic = 1;
181	} else {
182		bus->bus_no = bus_no;
183		bus->guest_handle = bus_no;
184	}
185	sprintf(bus->name, "%d", (int)bus->bus_no);
186	bus->device_count = dev_count;
187	bus->device =
188	    (struct device_info **)((char *)bus + sizeof(struct bus_info));
189	bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
190	bus->bus_channel_bytes = 0;
191	bus->bus_channel = NULL;
192
193	/* add bus to our bus list - but check for duplicates first */
194	read_lock(&bus_list_lock);
195	for (tmp = bus_list; tmp; tmp = tmp->next) {
196		if (tmp->bus_no == bus->bus_no)
197			break;
198	}
199	read_unlock(&bus_list_lock);
200	if (tmp) {
201		/* found a bus already in the list with same bus_no -
202		 * reject add
203		 */
204		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
205				 POSTCODE_SEVERITY_ERR);
206		kfree(bus);
207		return CONTROLVM_RESP_ERROR_ALREADY_DONE;
208	}
209	if ((msg->cmd.create_bus.channel_addr != 0) &&
210	    (msg->cmd.create_bus.channel_bytes != 0)) {
211		bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
212		bus->bus_channel =
213		    init_vbus_channel(msg->cmd.create_bus.channel_addr,
214				      msg->cmd.create_bus.channel_bytes);
215	}
216	/* the msg is bound for virtpci; send guest_msgs struct to callback */
217	if (!msg->hdr.flags.server) {
218		struct guest_msgs cmd;
219
220		cmd.msgtype = GUEST_ADD_VBUS;
221		cmd.add_vbus.bus_no = bus_no;
222		cmd.add_vbus.chanptr = bus->bus_channel;
223		cmd.add_vbus.dev_count = dev_count;
224		cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
225		cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
226		if (!virt_control_chan_func) {
227			POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
228					 POSTCODE_SEVERITY_ERR);
229			kfree(bus);
230			return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
231		}
232		if (!virt_control_chan_func(&cmd)) {
233			POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
234					 POSTCODE_SEVERITY_ERR);
235			kfree(bus);
236			return
237			    CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
238		}
239	}
240
241	/* add bus at the head of our list */
242	write_lock(&bus_list_lock);
243	if (!bus_list) {
244		bus_list = bus;
245	} else {
246		bus->next = bus_list;
247		bus_list = bus;
248	}
249	bus_list_count++;
250	write_unlock(&bus_list_lock);
251
252	POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
253			 POSTCODE_SEVERITY_INFO);
254	return CONTROLVM_RESP_SUCCESS;
255}
256
257static int
258destroy_bus(struct controlvm_message *msg, char *buf)
259{
260	int i;
261	struct bus_info *bus, *prev = NULL;
262	struct guest_msgs cmd;
263	u32 bus_no;
264
265	bus_no = msg->cmd.destroy_bus.bus_no;
266
267	read_lock(&bus_list_lock);
268
269	bus = bus_list;
270	while (bus) {
271		if (bus->bus_no == bus_no)
272			break;
273		prev = bus;
274		bus = bus->next;
275	}
276
277	if (!bus) {
278		read_unlock(&bus_list_lock);
279		return CONTROLVM_RESP_ERROR_ALREADY_DONE;
280	}
281
282	/* verify that this bus has no devices. */
283	for (i = 0; i < bus->device_count; i++) {
284		if (bus->device[i]) {
285			read_unlock(&bus_list_lock);
286			return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
287		}
288	}
289	read_unlock(&bus_list_lock);
290
291	if (msg->hdr.flags.server)
292		goto remove;
293
294	/* client messages require us to call the virtpci callback associated
295	   with this bus. */
296	cmd.msgtype = GUEST_DEL_VBUS;
297	cmd.del_vbus.bus_no = bus_no;
298	if (!virt_control_chan_func)
299		return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
300
301	if (!virt_control_chan_func(&cmd))
302		return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
303
304	/* finally, remove the bus from the list */
305remove:
306	write_lock(&bus_list_lock);
307	if (prev)	/* not at head */
308		prev->next = bus->next;
309	else
310		bus_list = bus->next;
311	bus_list_count--;
312	write_unlock(&bus_list_lock);
313
314	if (bus->bus_channel) {
315		uislib_iounmap(bus->bus_channel);
316		bus->bus_channel = NULL;
317	}
318
319	kfree(bus);
320	return CONTROLVM_RESP_SUCCESS;
321}
322
323static int create_device(struct controlvm_message *msg, char *buf)
324{
325	struct device_info *dev;
326	struct bus_info *bus;
327	struct guest_msgs cmd;
328	u32 bus_no, dev_no;
329	int result = CONTROLVM_RESP_SUCCESS;
330	u64 min_size = MIN_IO_CHANNEL_SIZE;
331	struct req_handler_info *req_handler;
332
333	bus_no = msg->cmd.create_device.bus_no;
334	dev_no = msg->cmd.create_device.dev_no;
335
336	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
337			 POSTCODE_SEVERITY_INFO);
338
339	dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
340	if (!dev) {
341		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
342				 POSTCODE_SEVERITY_ERR);
343		return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
344	}
345
346	dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
347	dev->intr = msg->cmd.create_device.intr;
348	dev->channel_addr = msg->cmd.create_device.channel_addr;
349	dev->bus_no = bus_no;
350	dev->dev_no = dev_no;
351	sema_init(&dev->interrupt_callback_lock, 1);	/* unlocked */
352	sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no);
353	/* map the channel memory for the device. */
354	if (msg->hdr.flags.test_message) {
355		dev->chanptr = (void __iomem *)__va(dev->channel_addr);
356	} else {
357		req_handler = req_handler_find(dev->channel_uuid);
358		if (req_handler)
359			/* generic service handler registered for this
360			 * channel
361			 */
362			min_size = req_handler->min_channel_bytes;
363		if (min_size > msg->cmd.create_device.channel_bytes) {
364			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
365					 bus_no, POSTCODE_SEVERITY_ERR);
366			result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
367			goto cleanup;
368		}
369		dev->chanptr =
370		    uislib_ioremap_cache(dev->channel_addr,
371					 msg->cmd.create_device.channel_bytes);
372		if (!dev->chanptr) {
373			result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
374			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
375					 bus_no, POSTCODE_SEVERITY_ERR);
376			goto cleanup;
377		}
378	}
379	dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
380	dev->channel_bytes = msg->cmd.create_device.channel_bytes;
381
382	read_lock(&bus_list_lock);
383	for (bus = bus_list; bus; bus = bus->next) {
384		if (bus->bus_no != bus_no)
385			continue;
386		/* make sure the device number is valid */
387		if (dev_no >= bus->device_count) {
388			result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
389			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
390					 bus_no, POSTCODE_SEVERITY_ERR);
391			read_unlock(&bus_list_lock);
392			goto cleanup;
393		}
394		/* make sure this device is not already set */
395		if (bus->device[dev_no]) {
396			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
397					 dev_no, bus_no,
398					 POSTCODE_SEVERITY_ERR);
399			result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
400			read_unlock(&bus_list_lock);
401			goto cleanup;
402		}
403		read_unlock(&bus_list_lock);
404		/* the msg is bound for virtpci; send
405		 * guest_msgs struct to callback
406		 */
407		if (msg->hdr.flags.server) {
408			bus->device[dev_no] = dev;
409			POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
410					 bus_no, POSTCODE_SEVERITY_INFO);
411			return CONTROLVM_RESP_SUCCESS;
412		}
413		if (uuid_le_cmp(dev->channel_uuid,
414				spar_vhba_channel_protocol_uuid) == 0) {
415			wait_for_valid_guid(&((struct channel_header __iomem *)
416					    (dev->chanptr))->chtype);
417			if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) {
418				POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
419						 dev_no, bus_no,
420						 POSTCODE_SEVERITY_ERR);
421				result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
422				goto cleanup;
423			}
424			cmd.msgtype = GUEST_ADD_VHBA;
425			cmd.add_vhba.chanptr = dev->chanptr;
426			cmd.add_vhba.bus_no = bus_no;
427			cmd.add_vhba.device_no = dev_no;
428			cmd.add_vhba.instance_uuid = dev->instance_uuid;
429			cmd.add_vhba.intr = dev->intr;
430		} else if (uuid_le_cmp(dev->channel_uuid,
431				       spar_vnic_channel_protocol_uuid) == 0) {
432			wait_for_valid_guid(&((struct channel_header __iomem *)
433					    (dev->chanptr))->chtype);
434			if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) {
435				POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
436						 dev_no, bus_no,
437						 POSTCODE_SEVERITY_ERR);
438				result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
439				goto cleanup;
440			}
441			cmd.msgtype = GUEST_ADD_VNIC;
442			cmd.add_vnic.chanptr = dev->chanptr;
443			cmd.add_vnic.bus_no = bus_no;
444			cmd.add_vnic.device_no = dev_no;
445			cmd.add_vnic.instance_uuid = dev->instance_uuid;
446			cmd.add_vhba.intr = dev->intr;
447		} else {
448			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
449					 bus_no, POSTCODE_SEVERITY_ERR);
450			result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
451			goto cleanup;
452		}
453
454		if (!virt_control_chan_func) {
455			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
456					 bus_no, POSTCODE_SEVERITY_ERR);
457			result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
458			goto cleanup;
459		}
460
461		if (!virt_control_chan_func(&cmd)) {
462			POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
463					 bus_no, POSTCODE_SEVERITY_ERR);
464			result =
465			     CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
466			goto cleanup;
467		}
468
469		bus->device[dev_no] = dev;
470		POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
471				 bus_no, POSTCODE_SEVERITY_INFO);
472		return CONTROLVM_RESP_SUCCESS;
473	}
474	read_unlock(&bus_list_lock);
475
476	POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
477			 POSTCODE_SEVERITY_ERR);
478	result = CONTROLVM_RESP_ERROR_BUS_INVALID;
479
480cleanup:
481	if (!msg->hdr.flags.test_message) {
482		uislib_iounmap(dev->chanptr);
483		dev->chanptr = NULL;
484	}
485
486	kfree(dev);
487	return result;
488}
489
490static int pause_device(struct controlvm_message *msg)
491{
492	u32 bus_no, dev_no;
493	struct bus_info *bus;
494	struct device_info *dev;
495	struct guest_msgs cmd;
496	int retval = CONTROLVM_RESP_SUCCESS;
497
498	bus_no = msg->cmd.device_change_state.bus_no;
499	dev_no = msg->cmd.device_change_state.dev_no;
500
501	read_lock(&bus_list_lock);
502	for (bus = bus_list; bus; bus = bus->next) {
503		if (bus->bus_no == bus_no) {
504			/* make sure the device number is valid */
505			if (dev_no >= bus->device_count) {
506				retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
507			} else {
508				/* make sure this device exists */
509				dev = bus->device[dev_no];
510				if (!dev) {
511					retval =
512					  CONTROLVM_RESP_ERROR_ALREADY_DONE;
513				}
514			}
515			break;
516		}
517	}
518	if (!bus)
519		retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
520
521	read_unlock(&bus_list_lock);
522	if (retval == CONTROLVM_RESP_SUCCESS) {
523		/* the msg is bound for virtpci; send
524		 * guest_msgs struct to callback
525		 */
526		if (uuid_le_cmp(dev->channel_uuid,
527				spar_vhba_channel_protocol_uuid) == 0) {
528			cmd.msgtype = GUEST_PAUSE_VHBA;
529			cmd.pause_vhba.chanptr = dev->chanptr;
530		} else if (uuid_le_cmp(dev->channel_uuid,
531				       spar_vnic_channel_protocol_uuid) == 0) {
532			cmd.msgtype = GUEST_PAUSE_VNIC;
533			cmd.pause_vnic.chanptr = dev->chanptr;
534		} else {
535			return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
536		}
537		if (!virt_control_chan_func)
538			return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
539		if (!virt_control_chan_func(&cmd)) {
540			return
541			  CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
542		}
543	}
544	return retval;
545}
546
547static int resume_device(struct controlvm_message *msg)
548{
549	u32 bus_no, dev_no;
550	struct bus_info *bus;
551	struct device_info *dev;
552	struct guest_msgs cmd;
553	int retval = CONTROLVM_RESP_SUCCESS;
554
555	bus_no = msg->cmd.device_change_state.bus_no;
556	dev_no = msg->cmd.device_change_state.dev_no;
557
558	read_lock(&bus_list_lock);
559	for (bus = bus_list; bus; bus = bus->next) {
560		if (bus->bus_no == bus_no) {
561			/* make sure the device number is valid */
562			if (dev_no >= bus->device_count) {
563				retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
564			} else {
565				/* make sure this device exists */
566				dev = bus->device[dev_no];
567				if (!dev) {
568					retval =
569					  CONTROLVM_RESP_ERROR_ALREADY_DONE;
570				}
571			}
572			break;
573		}
574	}
575
576	if (!bus)
577		retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
578
579	read_unlock(&bus_list_lock);
580	/* the msg is bound for virtpci; send
581	 * guest_msgs struct to callback
582	 */
583	if (retval == CONTROLVM_RESP_SUCCESS) {
584		if (uuid_le_cmp(dev->channel_uuid,
585				spar_vhba_channel_protocol_uuid) == 0) {
586			cmd.msgtype = GUEST_RESUME_VHBA;
587			cmd.resume_vhba.chanptr = dev->chanptr;
588		} else if (uuid_le_cmp(dev->channel_uuid,
589				       spar_vnic_channel_protocol_uuid) == 0) {
590			cmd.msgtype = GUEST_RESUME_VNIC;
591			cmd.resume_vnic.chanptr = dev->chanptr;
592		} else {
593			return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
594		}
595		if (!virt_control_chan_func)
596			return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
597		if (!virt_control_chan_func(&cmd)) {
598			return
599			  CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
600		}
601	}
602	return retval;
603}
604
605static int destroy_device(struct controlvm_message *msg, char *buf)
606{
607	u32 bus_no, dev_no;
608	struct bus_info *bus;
609	struct device_info *dev;
610	struct guest_msgs cmd;
611	int retval = CONTROLVM_RESP_SUCCESS;
612
613	bus_no = msg->cmd.destroy_device.bus_no;
614	dev_no = msg->cmd.destroy_device.bus_no;
615
616	read_lock(&bus_list_lock);
617	for (bus = bus_list; bus; bus = bus->next) {
618		if (bus->bus_no == bus_no) {
619			/* make sure the device number is valid */
620			if (dev_no >= bus->device_count) {
621				retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
622			} else {
623				/* make sure this device exists */
624				dev = bus->device[dev_no];
625				if (!dev) {
626					retval =
627					     CONTROLVM_RESP_ERROR_ALREADY_DONE;
628				}
629			}
630			break;
631		}
632	}
633
634	if (!bus)
635		retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
636	read_unlock(&bus_list_lock);
637	if (retval == CONTROLVM_RESP_SUCCESS) {
638		/* the msg is bound for virtpci; send
639		 * guest_msgs struct to callback
640		 */
641		if (uuid_le_cmp(dev->channel_uuid,
642				spar_vhba_channel_protocol_uuid) == 0) {
643			cmd.msgtype = GUEST_DEL_VHBA;
644			cmd.del_vhba.chanptr = dev->chanptr;
645		} else if (uuid_le_cmp(dev->channel_uuid,
646				       spar_vnic_channel_protocol_uuid) == 0) {
647			cmd.msgtype = GUEST_DEL_VNIC;
648			cmd.del_vnic.chanptr = dev->chanptr;
649		} else {
650			return
651			    CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
652		}
653		if (!virt_control_chan_func) {
654			return
655			    CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
656		}
657		if (!virt_control_chan_func(&cmd)) {
658			return
659			    CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
660		}
661/* you must disable channel interrupts BEFORE you unmap the channel,
662 * because if you unmap first, there may still be some activity going
663 * on which accesses the channel and you will get a "unable to handle
664 * kernel paging request"
665 */
666		if (dev->polling)
667			uislib_disable_channel_interrupts(bus_no, dev_no);
668		/* unmap the channel memory for the device. */
669		if (!msg->hdr.flags.test_message)
670			uislib_iounmap(dev->chanptr);
671		kfree(dev);
672		bus->device[dev_no] = NULL;
673	}
674	return retval;
675}
676
677static int
678init_chipset(struct controlvm_message *msg, char *buf)
679{
680	POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
681
682	max_bus_count = msg->cmd.init_chipset.bus_count;
683	platform_no = msg->cmd.init_chipset.platform_number;
684	phys_data_chan = 0;
685
686	/* We need to make sure we have our functions registered
687	* before processing messages.  If we are a test vehicle the
688	* test_message for init_chipset will be set.  We can ignore the
689	* waits for the callbacks, since this will be manually entered
690	* from a user.  If no test_message is set, we will wait for the
691	* functions.
692	*/
693	if (!msg->hdr.flags.test_message)
694		WAIT_ON_CALLBACK(virt_control_chan_func);
695
696	chipset_inited = 1;
697	POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
698
699	return CONTROLVM_RESP_SUCCESS;
700}
701
702static int delete_bus_glue(u32 bus_no)
703{
704	struct controlvm_message msg;
705
706	init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
707	msg.cmd.destroy_bus.bus_no = bus_no;
708	if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
709		return 0;
710	return 1;
711}
712
713static int delete_device_glue(u32 bus_no, u32 dev_no)
714{
715	struct controlvm_message msg;
716
717	init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
718	msg.cmd.destroy_device.bus_no = bus_no;
719	msg.cmd.destroy_device.dev_no = dev_no;
720	if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
721		return 0;
722	return 1;
723}
724
725int
726uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
727			     u64 channel_addr, ulong n_channel_bytes)
728{
729	struct controlvm_message msg;
730
731	/* step 0: init the chipset */
732	POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
733
734	if (!chipset_inited) {
735		/* step: initialize the chipset */
736		init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
737		/* this change is needed so that console will come up
738		* OK even when the bus 0 create comes in late.  If the
739		* bus 0 create is the first create, then the add_vnic
740		* will work fine, but if the bus 0 create arrives
741		* after number 4, then the add_vnic will fail, and the
742		* ultraboot will fail.
743		*/
744		msg.cmd.init_chipset.bus_count = 23;
745		msg.cmd.init_chipset.switch_count = 0;
746		if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
747			return 0;
748		POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
749				 POSTCODE_SEVERITY_INFO);
750	}
751
752	/* step 1: create a bus */
753	POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
754			 POSTCODE_SEVERITY_WARNING);
755	init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
756	msg.cmd.create_bus.bus_no = bus_no;
757	msg.cmd.create_bus.dev_count = 23;	/* devNo+1; */
758	msg.cmd.create_bus.channel_addr = channel_addr;
759	msg.cmd.create_bus.channel_bytes = n_channel_bytes;
760	if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
761		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
762				 POSTCODE_SEVERITY_ERR);
763		return 0;
764	}
765	POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
766
767	return 1;
768}
769EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
770
771int
772uislib_client_inject_del_bus(u32 bus_no)
773{
774	return delete_bus_glue(bus_no);
775}
776EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
777
778int
779uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
780{
781	struct controlvm_message msg;
782	int rc;
783
784	init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
785	msg.cmd.device_change_state.bus_no = bus_no;
786	msg.cmd.device_change_state.dev_no = dev_no;
787	msg.cmd.device_change_state.state = segment_state_standby;
788	rc = pause_device(&msg);
789	if (rc != CONTROLVM_RESP_SUCCESS)
790		return rc;
791	return 0;
792}
793EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
794
795int
796uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
797{
798	struct controlvm_message msg;
799	int rc;
800
801	init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
802	msg.cmd.device_change_state.bus_no = bus_no;
803	msg.cmd.device_change_state.dev_no = dev_no;
804	msg.cmd.device_change_state.state = segment_state_running;
805	rc = resume_device(&msg);
806	if (rc != CONTROLVM_RESP_SUCCESS)
807		return rc;
808	return 0;
809}
810EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
811
812int
813uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
814			      u64 phys_chan_addr, u32 chan_bytes,
815			      int is_test_addr, uuid_le inst_uuid,
816			      struct irq_info *intr)
817{
818	struct controlvm_message msg;
819
820	/* chipset init'ed with bus bus has been previously created -
821	* Verify it still exists step 2: create the VHBA device on the
822	* bus
823	*/
824	POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
825			 POSTCODE_SEVERITY_INFO);
826
827	init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
828	if (is_test_addr)
829		/* signify that the physical channel address does NOT
830		 * need to be ioremap()ed
831		 */
832		msg.hdr.flags.test_message = 1;
833	msg.cmd.create_device.bus_no = bus_no;
834	msg.cmd.create_device.dev_no = dev_no;
835	msg.cmd.create_device.dev_inst_uuid = inst_uuid;
836	if (intr)
837		msg.cmd.create_device.intr = *intr;
838	else
839		memset(&msg.cmd.create_device.intr, 0,
840		       sizeof(struct irq_info));
841	msg.cmd.create_device.channel_addr = phys_chan_addr;
842	if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
843		POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
844				 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
845		return 0;
846	}
847	msg.cmd.create_device.channel_bytes = chan_bytes;
848	msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
849	if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
850		POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
851				 POSTCODE_SEVERITY_ERR);
852		return 0;
853	}
854	POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
855			 POSTCODE_SEVERITY_INFO);
856	return 1;
857}
858EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
859
860int
861uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
862{
863	return delete_device_glue(bus_no, dev_no);
864}
865EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
866
867int
868uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
869			      u64 phys_chan_addr, u32 chan_bytes,
870			      int is_test_addr, uuid_le inst_uuid,
871			      struct irq_info *intr)
872{
873	struct controlvm_message msg;
874
875	/* chipset init'ed with bus bus has been previously created -
876	* Verify it still exists step 2: create the VNIC device on the
877	* bus
878	*/
879	POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
880			 POSTCODE_SEVERITY_INFO);
881
882	init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
883	if (is_test_addr)
884		/* signify that the physical channel address does NOT
885		 * need to be ioremap()ed
886		 */
887		msg.hdr.flags.test_message = 1;
888	msg.cmd.create_device.bus_no = bus_no;
889	msg.cmd.create_device.dev_no = dev_no;
890	msg.cmd.create_device.dev_inst_uuid = inst_uuid;
891	if (intr)
892		msg.cmd.create_device.intr = *intr;
893	else
894		memset(&msg.cmd.create_device.intr, 0,
895		       sizeof(struct irq_info));
896	msg.cmd.create_device.channel_addr = phys_chan_addr;
897	if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
898		POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
899				 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
900		return 0;
901	}
902	msg.cmd.create_device.channel_bytes = chan_bytes;
903	msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
904	if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
905		POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
906				 POSTCODE_SEVERITY_ERR);
907		return 0;
908	}
909
910	POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
911			 POSTCODE_SEVERITY_INFO);
912	return 1;
913}
914EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
915
916int
917uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
918{
919	struct controlvm_message msg;
920	int rc;
921
922	init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
923	msg.cmd.device_change_state.bus_no = bus_no;
924	msg.cmd.device_change_state.dev_no = dev_no;
925	msg.cmd.device_change_state.state = segment_state_standby;
926	rc = pause_device(&msg);
927	if (rc != CONTROLVM_RESP_SUCCESS)
928		return -1;
929	return 0;
930}
931EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
932
933int
934uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
935{
936	struct controlvm_message msg;
937	int rc;
938
939	init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
940	msg.cmd.device_change_state.bus_no = bus_no;
941	msg.cmd.device_change_state.dev_no = dev_no;
942	msg.cmd.device_change_state.state = segment_state_running;
943	rc = resume_device(&msg);
944	if (rc != CONTROLVM_RESP_SUCCESS)
945		return -1;
946	return 0;
947}
948EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
949
950int
951uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
952{
953	return delete_device_glue(bus_no, dev_no);
954}
955EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
956
957void *
958uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
959{
960	/* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
961	* return NULL.  If you do NOT specify __GFP_NORETRY, Linux
962	* will go to extreme measures to get memory for you (like,
963	* invoke oom killer), which will probably cripple the system.
964	*/
965	void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
966
967	if (!p)
968		return NULL;
969	return p;
970}
971EXPORT_SYMBOL_GPL(uislib_cache_alloc);
972
973void
974uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
975{
976	if (!p)
977		return;
978	kmem_cache_free(cur_pool, p);
979}
980EXPORT_SYMBOL_GPL(uislib_cache_free);
981
982/*****************************************************/
983/* proc filesystem callback functions                */
984/*****************************************************/
985
986#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
987					       buff_len, __VA_ARGS__)
988
989static int
990info_debugfs_read_helper(char **buff, int *buff_len)
991{
992	int i, tot = 0;
993	struct bus_info *bus;
994
995	if (PLINE("\nBuses:\n") < 0)
996		goto err_done;
997
998	read_lock(&bus_list_lock);
999	for (bus = bus_list; bus; bus = bus->next) {
1000		if (PLINE("    bus=0x%p, busNo=%d, deviceCount=%d\n",
1001			  bus, bus->bus_no, bus->device_count) < 0)
1002			goto err_done_unlock;
1003
1004		if (PLINE("        Devices:\n") < 0)
1005			goto err_done_unlock;
1006
1007		for (i = 0; i < bus->device_count; i++) {
1008			if (bus->device[i]) {
1009				if (PLINE("            busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
1010					  bus->bus_no, i, bus->device[i],
1011					  bus->device[i]->chanptr,
1012					  bus->device[i]->swtch) < 0)
1013					goto err_done_unlock;
1014
1015				if (PLINE("            first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
1016					  bus->device[i]->first_busy_cnt,
1017					  bus->device[i]->moved_to_tail_cnt,
1018					  bus->device[i]->last_on_list_cnt) < 0)
1019					goto err_done_unlock;
1020			}
1021		}
1022	}
1023	read_unlock(&bus_list_lock);
1024
1025	if (PLINE("UisUtils_Registered_Services: %d\n",
1026		  atomic_read(&uisutils_registered_services)) < 0)
1027		goto err_done;
1028	if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
1029		  cycles_before_wait, wait_cycles) < 0)
1030			goto err_done;
1031	if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
1032		  tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
1033			goto err_done;
1034	if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
1035			goto err_done;
1036	if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
1037			goto err_done;
1038
1039	return tot;
1040
1041err_done_unlock:
1042	read_unlock(&bus_list_lock);
1043err_done:
1044	return -1;
1045}
1046
1047static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1048				 size_t len, loff_t *offset)
1049{
1050	char *temp;
1051	int total_bytes = 0;
1052	int remaining_bytes = PROC_READ_BUFFER_SIZE;
1053
1054/* *start = buf; */
1055	if (!debug_buf) {
1056		debug_buf = vmalloc(PROC_READ_BUFFER_SIZE);
1057
1058		if (!debug_buf)
1059			return -ENOMEM;
1060	}
1061
1062	temp = debug_buf;
1063
1064	if ((*offset == 0) || (!debug_buf_valid)) {
1065		/* if the read fails, then -1 will be returned */
1066		total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes);
1067		debug_buf_valid = 1;
1068	} else {
1069		total_bytes = strlen(debug_buf);
1070	}
1071
1072	return simple_read_from_buffer(buf, len, offset,
1073				       debug_buf, total_bytes);
1074}
1075
1076static struct device_info *find_dev(u32 bus_no, u32 dev_no)
1077{
1078	struct bus_info *bus;
1079	struct device_info *dev = NULL;
1080
1081	read_lock(&bus_list_lock);
1082	for (bus = bus_list; bus; bus = bus->next) {
1083		if (bus->bus_no == bus_no) {
1084			/* make sure the device number is valid */
1085			if (dev_no >= bus->device_count)
1086				break;
1087			dev = bus->device[dev_no];
1088			break;
1089		}
1090	}
1091	read_unlock(&bus_list_lock);
1092	return dev;
1093}
1094
1095/*  This thread calls the "interrupt" function for each device that has
1096 *  enabled such using uislib_enable_channel_interrupts().  The "interrupt"
1097 *  function typically reads and processes the devices's channel input
1098 *  queue.  This thread repeatedly does this, until the thread is told to stop
1099 *  (via uisthread_stop()).  Sleeping rules:
1100 *  - If we have called the "interrupt" function for all devices, and all of
1101 *    them have reported "nothing processed" (returned 0), then we will go to
1102 *    sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
1103 *  - If anyone calls uislib_force_channel_interrupt(), the above jiffy
1104 *    sleep will be interrupted, and we will resume calling the "interrupt"
1105 *    function for all devices.
1106 *  - The list of devices is dynamically re-ordered in order to
1107 *    attempt to preserve fairness.  Whenever we spin thru the list of
1108 *    devices and call the dev->interrupt() function, if we find
1109 *    devices which report that there is still more work to do, the
1110 *    the first such device we find is moved to the end of the device
1111 *    list.  This ensures that extremely busy devices don't starve out
1112 *    less-busy ones.
1113 *
1114 */
1115static int process_incoming(void *v)
1116{
1117	unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
1118	struct list_head *new_tail = NULL;
1119	int i;
1120
1121	UIS_DAEMONIZE("dev_incoming");
1122	for (i = 0; i < 16; i++) {
1123		old_cycles = get_cycles();
1124		wait_event_timeout(poll_dev_wake_q,
1125				   0, POLLJIFFIES_NORMAL);
1126		cur_cycles = get_cycles();
1127		if (wait_cycles == 0) {
1128			wait_cycles = (cur_cycles - old_cycles);
1129		} else {
1130			if (wait_cycles < (cur_cycles - old_cycles))
1131				wait_cycles = (cur_cycles - old_cycles);
1132		}
1133	}
1134	cycles_before_wait = wait_cycles;
1135	idle_cycles = 0;
1136	poll_dev_start = 0;
1137	while (1) {
1138		struct list_head *lelt, *tmp;
1139		struct device_info *dev = NULL;
1140
1141		/* poll each channel for input */
1142		down(&poll_dev_lock);
1143		new_tail = NULL;
1144		list_for_each_safe(lelt, tmp, &poll_dev_chan) {
1145			int rc = 0;
1146
1147			dev = list_entry(lelt, struct device_info,
1148					 list_polling_device_channels);
1149			down(&dev->interrupt_callback_lock);
1150			if (dev->interrupt)
1151				rc = dev->interrupt(dev->interrupt_context);
1152			else
1153				continue;
1154			up(&dev->interrupt_callback_lock);
1155			if (rc) {
1156				/* dev->interrupt returned, but there
1157				* is still more work to do.
1158				* Reschedule work to occur as soon as
1159				* possible. */
1160				idle_cycles = 0;
1161				if (!new_tail) {
1162					dev->first_busy_cnt++;
1163					if (!
1164					    (list_is_last
1165					     (lelt,
1166					      &poll_dev_chan))) {
1167						new_tail = lelt;
1168						dev->moved_to_tail_cnt++;
1169					} else {
1170						dev->last_on_list_cnt++;
1171					}
1172				}
1173			}
1174			if (kthread_should_stop())
1175				break;
1176		}
1177		if (new_tail) {
1178			tot_moved_to_tail_cnt++;
1179			list_move_tail(new_tail, &poll_dev_chan);
1180		}
1181		up(&poll_dev_lock);
1182		cur_cycles = get_cycles();
1183		delta_cycles = cur_cycles - old_cycles;
1184		old_cycles = cur_cycles;
1185
1186		/* At this point, we have scanned thru all of the
1187		* channels, and at least one of the following is true:
1188		* - there is no input waiting on any of the channels
1189		* - we have received a signal to stop this thread
1190		*/
1191		if (kthread_should_stop())
1192			break;
1193		if (en_smart_wakeup == 0xFF)
1194			break;
1195		/* wait for POLLJIFFIES_NORMAL jiffies, or until
1196		* someone wakes up poll_dev_wake_q,
1197		* whichever comes first only do a wait when we have
1198		* been idle for cycles_before_wait cycles.
1199		*/
1200		if (idle_cycles > cycles_before_wait) {
1201			poll_dev_start = 0;
1202			tot_wait_cnt++;
1203			wait_event_timeout(poll_dev_wake_q,
1204					   poll_dev_start,
1205					   POLLJIFFIES_NORMAL);
1206			poll_dev_start = 1;
1207		} else {
1208			tot_schedule_cnt++;
1209			schedule();
1210			idle_cycles = idle_cycles + delta_cycles;
1211		}
1212	}
1213	complete_and_exit(&incoming_ti.has_stopped, 0);
1214}
1215
1216static BOOL
1217initialize_incoming_thread(void)
1218{
1219	if (incoming_started)
1220		return TRUE;
1221	if (!uisthread_start(&incoming_ti,
1222			     &process_incoming, NULL, "dev_incoming")) {
1223		return FALSE;
1224	}
1225	incoming_started = TRUE;
1226	return TRUE;
1227}
1228
1229/*  Add a new device/channel to the list being processed by
1230 *  process_incoming().
1231 *  <interrupt> - indicates the function to call periodically.
1232 *  <interrupt_context> - indicates the data to pass to the <interrupt>
1233 *                        function.
1234 */
1235void
1236uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no,
1237				 int (*interrupt)(void *),
1238				 void *interrupt_context)
1239{
1240	struct device_info *dev;
1241
1242	dev = find_dev(bus_no, dev_no);
1243	if (!dev)
1244		return;
1245
1246	down(&poll_dev_lock);
1247	initialize_incoming_thread();
1248	dev->interrupt = interrupt;
1249	dev->interrupt_context = interrupt_context;
1250	dev->polling = TRUE;
1251	list_add_tail(&dev->list_polling_device_channels,
1252		      &poll_dev_chan);
1253	up(&poll_dev_lock);
1254}
1255EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
1256
1257/*  Remove a device/channel from the list being processed by
1258 *  process_incoming().
1259 */
1260void
1261uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
1262{
1263	struct device_info *dev;
1264
1265	dev = find_dev(bus_no, dev_no);
1266	if (!dev)
1267		return;
1268	down(&poll_dev_lock);
1269	list_del(&dev->list_polling_device_channels);
1270	dev->polling = FALSE;
1271	dev->interrupt = NULL;
1272	up(&poll_dev_lock);
1273}
1274EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
1275
1276static void
1277do_wakeup_polling_device_channels(struct work_struct *dummy)
1278{
1279	if (!poll_dev_start) {
1280		poll_dev_start = 1;
1281		wake_up(&poll_dev_wake_q);
1282	}
1283}
1284
1285static DECLARE_WORK(work_wakeup_polling_device_channels,
1286		    do_wakeup_polling_device_channels);
1287
1288/*  Call this function when you want to send a hint to process_incoming() that
1289 *  your device might have more requests.
1290 */
1291void
1292uislib_force_channel_interrupt(u32 bus_no, u32 dev_no)
1293{
1294	if (en_smart_wakeup == 0)
1295		return;
1296	if (poll_dev_start)
1297		return;
1298	/* The point of using schedule_work() instead of just doing
1299	 * the work inline is to force a slight delay before waking up
1300	 * the process_incoming() thread.
1301	 */
1302	tot_wakeup_cnt++;
1303	schedule_work(&work_wakeup_polling_device_channels);
1304}
1305EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
1306
1307/*****************************************************/
1308/* Module Init & Exit functions                      */
1309/*****************************************************/
1310
1311static int __init
1312uislib_mod_init(void)
1313{
1314	if (!unisys_spar_platform)
1315		return -ENODEV;
1316
1317	/* initialize global pointers to NULL */
1318	bus_list = NULL;
1319	bus_list_count = 0;
1320	max_bus_count = 0;
1321	rwlock_init(&bus_list_lock);
1322	virt_control_chan_func = NULL;
1323
1324	/* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
1325	 * then map this physical address to a virtual address. */
1326	POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1327
1328	dir_debugfs = debugfs_create_dir(DIR_DEBUGFS_ENTRY, NULL);
1329	if (dir_debugfs) {
1330		info_debugfs_entry = debugfs_create_file(
1331			INFO_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, NULL,
1332			&debugfs_info_fops);
1333
1334		platformnumber_debugfs_read = debugfs_create_u32(
1335			PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs,
1336			&platform_no);
1337
1338		cycles_before_wait_debugfs_read = debugfs_create_u64(
1339			CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1340			&cycles_before_wait);
1341
1342		smart_wakeup_debugfs_entry = debugfs_create_bool(
1343			SMART_WAKEUP_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1344			&en_smart_wakeup);
1345	}
1346
1347	POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
1348	return 0;
1349}
1350
1351static void __exit
1352uislib_mod_exit(void)
1353{
1354	if (debug_buf) {
1355		vfree(debug_buf);
1356		debug_buf = NULL;
1357	}
1358
1359	debugfs_remove(info_debugfs_entry);
1360	debugfs_remove(smart_wakeup_debugfs_entry);
1361	debugfs_remove(cycles_before_wait_debugfs_read);
1362	debugfs_remove(platformnumber_debugfs_read);
1363	debugfs_remove(dir_debugfs);
1364}
1365
1366module_init(uislib_mod_init);
1367module_exit(uislib_mod_exit);
1368
1369MODULE_LICENSE("GPL");
1370MODULE_AUTHOR("Usha Srinivasan");
1371MODULE_ALIAS("uislib");
1372  /* this is extracted during depmod and kept in modules.dep */
1373