1/******************************************************************************
2 *
3 * Module Name: evgpeutil - GPE utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions, and the following disclaimer,
16 *    without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 *    substantially similar to the "NO WARRANTY" disclaimer below
19 *    ("Disclaimer") and any redistribution must be conditioned upon
20 *    including a substantially similar Disclaimer requirement for further
21 *    binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 *    of any contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47
48#define _COMPONENT          ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeutil")
50
51#if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
52/*******************************************************************************
53 *
54 * FUNCTION:    acpi_ev_walk_gpe_list
55 *
56 * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
57 *              context             - Value passed to callback
58 *
59 * RETURN:      Status
60 *
61 * DESCRIPTION: Walk the GPE lists.
62 *
63 ******************************************************************************/
64acpi_status
65acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
66{
67	struct acpi_gpe_block_info *gpe_block;
68	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
69	acpi_status status = AE_OK;
70	acpi_cpu_flags flags;
71
72	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
73
74	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
75
76	/* Walk the interrupt level descriptor list */
77
78	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
79	while (gpe_xrupt_info) {
80
81		/* Walk all Gpe Blocks attached to this interrupt level */
82
83		gpe_block = gpe_xrupt_info->gpe_block_list_head;
84		while (gpe_block) {
85
86			/* One callback per GPE block */
87
88			status =
89			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
90					      context);
91			if (ACPI_FAILURE(status)) {
92				if (status == AE_CTRL_END) {	/* Callback abort */
93					status = AE_OK;
94				}
95				goto unlock_and_exit;
96			}
97
98			gpe_block = gpe_block->next;
99		}
100
101		gpe_xrupt_info = gpe_xrupt_info->next;
102	}
103
104unlock_and_exit:
105	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
106	return_ACPI_STATUS(status);
107}
108
109/*******************************************************************************
110 *
111 * FUNCTION:    acpi_ev_get_gpe_device
112 *
113 * PARAMETERS:  GPE_WALK_CALLBACK
114 *
115 * RETURN:      Status
116 *
117 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
118 *              block device. NULL if the GPE is one of the FADT-defined GPEs.
119 *
120 ******************************************************************************/
121
122acpi_status
123acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
124		       struct acpi_gpe_block_info *gpe_block, void *context)
125{
126	struct acpi_gpe_device_info *info = context;
127
128	/* Increment Index by the number of GPEs in this block */
129
130	info->next_block_base_index += gpe_block->gpe_count;
131
132	if (info->index < info->next_block_base_index) {
133		/*
134		 * The GPE index is within this block, get the node. Leave the node
135		 * NULL for the FADT-defined GPEs
136		 */
137		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
138			info->gpe_device = gpe_block->node;
139		}
140
141		info->status = AE_OK;
142		return (AE_CTRL_END);
143	}
144
145	return (AE_OK);
146}
147
148/*******************************************************************************
149 *
150 * FUNCTION:    acpi_ev_get_gpe_xrupt_block
151 *
152 * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
153 *              gpe_xrupt_block             - Where the block is returned
154 *
155 * RETURN:      Status
156 *
157 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
158 *              block per unique interrupt level used for GPEs. Should be
159 *              called only when the GPE lists are semaphore locked and not
160 *              subject to change.
161 *
162 ******************************************************************************/
163
164acpi_status
165acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
166			    struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
167{
168	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
169	struct acpi_gpe_xrupt_info *gpe_xrupt;
170	acpi_status status;
171	acpi_cpu_flags flags;
172
173	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
174
175	/* No need for lock since we are not changing any list elements here */
176
177	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
178	while (next_gpe_xrupt) {
179		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
180			*gpe_xrupt_block = next_gpe_xrupt;
181			return_ACPI_STATUS(AE_OK);
182		}
183
184		next_gpe_xrupt = next_gpe_xrupt->next;
185	}
186
187	/* Not found, must allocate a new xrupt descriptor */
188
189	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
190	if (!gpe_xrupt) {
191		return_ACPI_STATUS(AE_NO_MEMORY);
192	}
193
194	gpe_xrupt->interrupt_number = interrupt_number;
195
196	/* Install new interrupt descriptor with spin lock */
197
198	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
199	if (acpi_gbl_gpe_xrupt_list_head) {
200		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
201		while (next_gpe_xrupt->next) {
202			next_gpe_xrupt = next_gpe_xrupt->next;
203		}
204
205		next_gpe_xrupt->next = gpe_xrupt;
206		gpe_xrupt->previous = next_gpe_xrupt;
207	} else {
208		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
209	}
210
211	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
212
213	/* Install new interrupt handler if not SCI_INT */
214
215	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
216		status = acpi_os_install_interrupt_handler(interrupt_number,
217							   acpi_ev_gpe_xrupt_handler,
218							   gpe_xrupt);
219		if (ACPI_FAILURE(status)) {
220			ACPI_EXCEPTION((AE_INFO, status,
221					"Could not install GPE interrupt handler at level 0x%X",
222					interrupt_number));
223			return_ACPI_STATUS(status);
224		}
225	}
226
227	*gpe_xrupt_block = gpe_xrupt;
228	return_ACPI_STATUS(AE_OK);
229}
230
231/*******************************************************************************
232 *
233 * FUNCTION:    acpi_ev_delete_gpe_xrupt
234 *
235 * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
236 *
237 * RETURN:      Status
238 *
239 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
240 *              interrupt handler if not the SCI interrupt.
241 *
242 ******************************************************************************/
243
244acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
245{
246	acpi_status status;
247	acpi_cpu_flags flags;
248
249	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
250
251	/* We never want to remove the SCI interrupt handler */
252
253	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
254		gpe_xrupt->gpe_block_list_head = NULL;
255		return_ACPI_STATUS(AE_OK);
256	}
257
258	/* Disable this interrupt */
259
260	status =
261	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
262					     acpi_ev_gpe_xrupt_handler);
263	if (ACPI_FAILURE(status)) {
264		return_ACPI_STATUS(status);
265	}
266
267	/* Unlink the interrupt block with lock */
268
269	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
270	if (gpe_xrupt->previous) {
271		gpe_xrupt->previous->next = gpe_xrupt->next;
272	} else {
273		/* No previous, update list head */
274
275		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
276	}
277
278	if (gpe_xrupt->next) {
279		gpe_xrupt->next->previous = gpe_xrupt->previous;
280	}
281	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
282
283	/* Free the block */
284
285	ACPI_FREE(gpe_xrupt);
286	return_ACPI_STATUS(AE_OK);
287}
288
289/*******************************************************************************
290 *
291 * FUNCTION:    acpi_ev_delete_gpe_handlers
292 *
293 * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
294 *              gpe_block           - Gpe Block info
295 *
296 * RETURN:      Status
297 *
298 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
299 *              Used only prior to termination.
300 *
301 ******************************************************************************/
302
303acpi_status
304acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
305			    struct acpi_gpe_block_info *gpe_block,
306			    void *context)
307{
308	struct acpi_gpe_event_info *gpe_event_info;
309	struct acpi_gpe_notify_info *notify;
310	struct acpi_gpe_notify_info *next;
311	u32 i;
312	u32 j;
313
314	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
315
316	/* Examine each GPE Register within the block */
317
318	for (i = 0; i < gpe_block->register_count; i++) {
319
320		/* Now look at the individual GPEs in this byte register */
321
322		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
323			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
324								 ACPI_GPE_REGISTER_WIDTH)
325								+ j];
326
327			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
328			     ACPI_GPE_DISPATCH_HANDLER) ||
329			    (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
330			     ACPI_GPE_DISPATCH_RAW_HANDLER)) {
331
332				/* Delete an installed handler block */
333
334				ACPI_FREE(gpe_event_info->dispatch.handler);
335				gpe_event_info->dispatch.handler = NULL;
336				gpe_event_info->flags &=
337				    ~ACPI_GPE_DISPATCH_MASK;
338			} else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
339				   == ACPI_GPE_DISPATCH_NOTIFY) {
340
341				/* Delete the implicit notification device list */
342
343				notify = gpe_event_info->dispatch.notify_list;
344				while (notify) {
345					next = notify->next;
346					ACPI_FREE(notify);
347					notify = next;
348				}
349				gpe_event_info->dispatch.notify_list = NULL;
350				gpe_event_info->flags &=
351				    ~ACPI_GPE_DISPATCH_MASK;
352			}
353		}
354	}
355
356	return_ACPI_STATUS(AE_OK);
357}
358
359#endif				/* !ACPI_REDUCED_HARDWARE */
360