ws                332 arch/mips/include/asm/asmmacro.h 	.macro	copy_s_w	ws, n
ws                341 arch/mips/include/asm/asmmacro.h 	.macro	copy_s_d	ws, n
ws                472 arch/mips/include/asm/asmmacro.h 	.macro	copy_s_w	ws, n
ws                481 arch/mips/include/asm/asmmacro.h 	.macro	copy_s_d	ws, n
ws                538 arch/mips/include/asm/r4kcache.h 	unsigned long ws, addr;						\
ws                540 arch/mips/include/asm/r4kcache.h 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
ws                542 arch/mips/include/asm/r4kcache.h 			cache##lsize##_unroll32(addr|ws, indexop);	\
ws                564 arch/mips/include/asm/r4kcache.h 	unsigned long ws, addr;						\
ws                566 arch/mips/include/asm/r4kcache.h 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
ws                568 arch/mips/include/asm/r4kcache.h 			cache##lsize##_unroll32(addr|ws, indexop);	\
ws                687 arch/mips/include/asm/r4kcache.h 	unsigned long ws, addr;						\
ws                689 arch/mips/include/asm/r4kcache.h 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
ws                691 arch/mips/include/asm/r4kcache.h 			cache##lsize##_unroll32(addr|ws, indexop);	\
ws                268 arch/mips/mm/c-r4k.c 	unsigned long ws, addr;
ws                272 arch/mips/mm/c-r4k.c 	for (ws = 0; ws < ws_end; ws += ws_inc)
ws                274 arch/mips/mm/c-r4k.c 			cache32_unroll32(addr|ws, Index_Invalidate_I);
ws                277 arch/mips/mm/c-r4k.c 	for (ws = 0; ws < ws_end; ws += ws_inc)
ws                279 arch/mips/mm/c-r4k.c 			cache32_unroll32(addr|ws, Index_Invalidate_I);
ws                299 arch/mips/mm/c-r4k.c 	unsigned long ws, addr;
ws                303 arch/mips/mm/c-r4k.c 	for (ws = 0; ws < ws_end; ws += ws_inc)
ws                305 arch/mips/mm/c-r4k.c 			cache32_unroll32(addr|ws, Index_Invalidate_I);
ws                308 arch/mips/mm/c-r4k.c 	for (ws = 0; ws < ws_end; ws += ws_inc)
ws                310 arch/mips/mm/c-r4k.c 			cache32_unroll32(addr|ws, Index_Invalidate_I);
ws                 26 arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c static void fsl_free_resource(struct work_struct *ws)
ws                 29 arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c 		container_of(ws, struct fsl_mpic_timer_wakeup, free_work);
ws                333 arch/s390/crypto/prng.c 	struct prno_ws_s ws;
ws                335 arch/s390/crypto/prng.c 	memset(&ws, 0, sizeof(ws));
ws                339 arch/s390/crypto/prng.c 		   &ws, NULL, 0, seed, sizeof(seed));
ws                342 arch/s390/crypto/prng.c 	if (memcmp(ws.V, V0, sizeof(V0)) != 0
ws                343 arch/s390/crypto/prng.c 	    || memcmp(ws.C, C0, sizeof(C0)) != 0) {
ws                352 arch/s390/crypto/prng.c 		   &ws, buf, sizeof(buf), NULL, 0);
ws                354 arch/s390/crypto/prng.c 		   &ws, buf, sizeof(buf), NULL, 0);
ws                113 arch/xtensa/include/asm/processor.h #define MAKE_RA_FOR_CALL(ra,ws)   (((ra) & 0x3fffffff) | (ws) << 30)
ws                305 arch/xtensa/kernel/ptrace.c 			unsigned long ws = regs->windowstart;
ws                306 arch/xtensa/kernel/ptrace.c 			tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
ws                 56 arch/xtensa/kernel/signal.c 	const unsigned long ws = regs->windowstart;
ws                 70 arch/xtensa/kernel/signal.c 	wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
ws                114 block/blk-mq-tag.c 	struct sbq_wait_state *ws;
ws                138 block/blk-mq-tag.c 	ws = bt_wait_ptr(bt, data->hctx);
ws                157 block/blk-mq-tag.c 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
ws                166 block/blk-mq-tag.c 		sbitmap_finish_wait(bt, ws, &wait);
ws                185 block/blk-mq-tag.c 		ws = bt_wait_ptr(bt, data->hctx);
ws                188 block/blk-mq-tag.c 	sbitmap_finish_wait(bt, ws, &wait);
ws                 43 block/blk-mq-tag.h 		return &bt->ws[0];
ws                709 block/kyber-iosched.c 	struct sbq_wait_state *ws;
ws                720 block/kyber-iosched.c 		ws = sbq_wait_ptr(domain_tokens,
ws                722 block/kyber-iosched.c 		khd->domain_ws[sched_domain] = ws;
ws                723 block/kyber-iosched.c 		sbitmap_add_wait_queue(domain_tokens, ws, wait);
ws                740 block/kyber-iosched.c 		ws = khd->domain_ws[sched_domain];
ws                741 block/kyber-iosched.c 		spin_lock_irq(&ws->wait.lock);
ws                743 block/kyber-iosched.c 		spin_unlock_irq(&ws->wait.lock);
ws                460 drivers/acpi/device_pm.c 		pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
ws                504 drivers/acpi/device_pm.c 	adev->wakeup.ws = wakeup_source_register(&adev->dev,
ws                538 drivers/acpi/device_pm.c 	wakeup_source_unregister(adev->wakeup.ws);
ws                157 drivers/base/power/power.h 				   struct wakeup_source *ws);
ws                158 drivers/base/power/power.h extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
ws                 83 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                 87 drivers/base/power/wakeup.c 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
ws                 88 drivers/base/power/wakeup.c 	if (!ws)
ws                 94 drivers/base/power/wakeup.c 	ws->name = ws_name;
ws                 99 drivers/base/power/wakeup.c 	ws->id = id;
ws                101 drivers/base/power/wakeup.c 	return ws;
ws                104 drivers/base/power/wakeup.c 	kfree_const(ws->name);
ws                106 drivers/base/power/wakeup.c 	kfree(ws);
ws                115 drivers/base/power/wakeup.c static void wakeup_source_record(struct wakeup_source *ws)
ws                121 drivers/base/power/wakeup.c 	if (ws->event_count) {
ws                123 drivers/base/power/wakeup.c 			ktime_add(deleted_ws.total_time, ws->total_time);
ws                126 drivers/base/power/wakeup.c 				  ws->prevent_sleep_time);
ws                128 drivers/base/power/wakeup.c 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
ws                129 drivers/base/power/wakeup.c 				deleted_ws.max_time : ws->max_time;
ws                130 drivers/base/power/wakeup.c 		deleted_ws.event_count += ws->event_count;
ws                131 drivers/base/power/wakeup.c 		deleted_ws.active_count += ws->active_count;
ws                132 drivers/base/power/wakeup.c 		deleted_ws.relax_count += ws->relax_count;
ws                133 drivers/base/power/wakeup.c 		deleted_ws.expire_count += ws->expire_count;
ws                134 drivers/base/power/wakeup.c 		deleted_ws.wakeup_count += ws->wakeup_count;
ws                140 drivers/base/power/wakeup.c static void wakeup_source_free(struct wakeup_source *ws)
ws                142 drivers/base/power/wakeup.c 	ida_free(&wakeup_ida, ws->id);
ws                143 drivers/base/power/wakeup.c 	kfree_const(ws->name);
ws                144 drivers/base/power/wakeup.c 	kfree(ws);
ws                153 drivers/base/power/wakeup.c void wakeup_source_destroy(struct wakeup_source *ws)
ws                155 drivers/base/power/wakeup.c 	if (!ws)
ws                158 drivers/base/power/wakeup.c 	__pm_relax(ws);
ws                159 drivers/base/power/wakeup.c 	wakeup_source_record(ws);
ws                160 drivers/base/power/wakeup.c 	wakeup_source_free(ws);
ws                168 drivers/base/power/wakeup.c void wakeup_source_add(struct wakeup_source *ws)
ws                172 drivers/base/power/wakeup.c 	if (WARN_ON(!ws))
ws                175 drivers/base/power/wakeup.c 	spin_lock_init(&ws->lock);
ws                176 drivers/base/power/wakeup.c 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws                177 drivers/base/power/wakeup.c 	ws->active = false;
ws                180 drivers/base/power/wakeup.c 	list_add_rcu(&ws->entry, &wakeup_sources);
ws                189 drivers/base/power/wakeup.c void wakeup_source_remove(struct wakeup_source *ws)
ws                193 drivers/base/power/wakeup.c 	if (WARN_ON(!ws))
ws                197 drivers/base/power/wakeup.c 	list_del_rcu(&ws->entry);
ws                201 drivers/base/power/wakeup.c 	del_timer_sync(&ws->timer);
ws                206 drivers/base/power/wakeup.c 	ws->timer.function = NULL;
ws                218 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                221 drivers/base/power/wakeup.c 	ws = wakeup_source_create(name);
ws                222 drivers/base/power/wakeup.c 	if (ws) {
ws                224 drivers/base/power/wakeup.c 			ret = wakeup_source_sysfs_add(dev, ws);
ws                226 drivers/base/power/wakeup.c 				wakeup_source_free(ws);
ws                230 drivers/base/power/wakeup.c 		wakeup_source_add(ws);
ws                232 drivers/base/power/wakeup.c 	return ws;
ws                240 drivers/base/power/wakeup.c void wakeup_source_unregister(struct wakeup_source *ws)
ws                242 drivers/base/power/wakeup.c 	if (ws) {
ws                243 drivers/base/power/wakeup.c 		wakeup_source_remove(ws);
ws                244 drivers/base/power/wakeup.c 		if (ws->dev)
ws                245 drivers/base/power/wakeup.c 			wakeup_source_sysfs_remove(ws);
ws                247 drivers/base/power/wakeup.c 		wakeup_source_destroy(ws);
ws                259 drivers/base/power/wakeup.c static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
ws                266 drivers/base/power/wakeup.c 	dev->power.wakeup = ws;
ws                281 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                290 drivers/base/power/wakeup.c 	ws = wakeup_source_register(dev, dev_name(dev));
ws                291 drivers/base/power/wakeup.c 	if (!ws)
ws                294 drivers/base/power/wakeup.c 	ret = device_wakeup_attach(dev, ws);
ws                296 drivers/base/power/wakeup.c 		wakeup_source_unregister(ws);
ws                316 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                318 drivers/base/power/wakeup.c 	ws = dev->power.wakeup;
ws                319 drivers/base/power/wakeup.c 	if (!ws)
ws                322 drivers/base/power/wakeup.c 	if (ws->wakeirq)
ws                325 drivers/base/power/wakeup.c 	ws->wakeirq = wakeirq;
ws                338 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                340 drivers/base/power/wakeup.c 	ws = dev->power.wakeup;
ws                341 drivers/base/power/wakeup.c 	if (ws)
ws                342 drivers/base/power/wakeup.c 		ws->wakeirq = NULL;
ws                352 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                356 drivers/base/power/wakeup.c 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
ws                357 drivers/base/power/wakeup.c 		dev_pm_arm_wake_irq(ws->wakeirq);
ws                368 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                372 drivers/base/power/wakeup.c 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
ws                373 drivers/base/power/wakeup.c 		dev_pm_disarm_wake_irq(ws->wakeirq);
ws                385 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                388 drivers/base/power/wakeup.c 	ws = dev->power.wakeup;
ws                391 drivers/base/power/wakeup.c 	return ws;
ws                403 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                408 drivers/base/power/wakeup.c 	ws = device_wakeup_detach(dev);
ws                409 drivers/base/power/wakeup.c 	wakeup_source_unregister(ws);
ws                489 drivers/base/power/wakeup.c static bool wakeup_source_not_registered(struct wakeup_source *ws)
ws                495 drivers/base/power/wakeup.c 	return ws->timer.function != pm_wakeup_timer_fn;
ws                534 drivers/base/power/wakeup.c static void wakeup_source_activate(struct wakeup_source *ws)
ws                538 drivers/base/power/wakeup.c 	if (WARN_ONCE(wakeup_source_not_registered(ws),
ws                542 drivers/base/power/wakeup.c 	ws->active = true;
ws                543 drivers/base/power/wakeup.c 	ws->active_count++;
ws                544 drivers/base/power/wakeup.c 	ws->last_time = ktime_get();
ws                545 drivers/base/power/wakeup.c 	if (ws->autosleep_enabled)
ws                546 drivers/base/power/wakeup.c 		ws->start_prevent_time = ws->last_time;
ws                551 drivers/base/power/wakeup.c 	trace_wakeup_source_activate(ws->name, cec);
ws                559 drivers/base/power/wakeup.c static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
ws                561 drivers/base/power/wakeup.c 	ws->event_count++;
ws                564 drivers/base/power/wakeup.c 		ws->wakeup_count++;
ws                566 drivers/base/power/wakeup.c 	if (!ws->active)
ws                567 drivers/base/power/wakeup.c 		wakeup_source_activate(ws);
ws                579 drivers/base/power/wakeup.c void __pm_stay_awake(struct wakeup_source *ws)
ws                583 drivers/base/power/wakeup.c 	if (!ws)
ws                586 drivers/base/power/wakeup.c 	spin_lock_irqsave(&ws->lock, flags);
ws                588 drivers/base/power/wakeup.c 	wakeup_source_report_event(ws, false);
ws                589 drivers/base/power/wakeup.c 	del_timer(&ws->timer);
ws                590 drivers/base/power/wakeup.c 	ws->timer_expires = 0;
ws                592 drivers/base/power/wakeup.c 	spin_unlock_irqrestore(&ws->lock, flags);
ws                621 drivers/base/power/wakeup.c static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
ws                623 drivers/base/power/wakeup.c 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
ws                624 drivers/base/power/wakeup.c 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
ws                627 drivers/base/power/wakeup.c static inline void update_prevent_sleep_time(struct wakeup_source *ws,
ws                639 drivers/base/power/wakeup.c static void wakeup_source_deactivate(struct wakeup_source *ws)
ws                645 drivers/base/power/wakeup.c 	ws->relax_count++;
ws                655 drivers/base/power/wakeup.c 	if (ws->relax_count != ws->active_count) {
ws                656 drivers/base/power/wakeup.c 		ws->relax_count--;
ws                660 drivers/base/power/wakeup.c 	ws->active = false;
ws                663 drivers/base/power/wakeup.c 	duration = ktime_sub(now, ws->last_time);
ws                664 drivers/base/power/wakeup.c 	ws->total_time = ktime_add(ws->total_time, duration);
ws                665 drivers/base/power/wakeup.c 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
ws                666 drivers/base/power/wakeup.c 		ws->max_time = duration;
ws                668 drivers/base/power/wakeup.c 	ws->last_time = now;
ws                669 drivers/base/power/wakeup.c 	del_timer(&ws->timer);
ws                670 drivers/base/power/wakeup.c 	ws->timer_expires = 0;
ws                672 drivers/base/power/wakeup.c 	if (ws->autosleep_enabled)
ws                673 drivers/base/power/wakeup.c 		update_prevent_sleep_time(ws, now);
ws                680 drivers/base/power/wakeup.c 	trace_wakeup_source_deactivate(ws->name, cec);
ws                696 drivers/base/power/wakeup.c void __pm_relax(struct wakeup_source *ws)
ws                700 drivers/base/power/wakeup.c 	if (!ws)
ws                703 drivers/base/power/wakeup.c 	spin_lock_irqsave(&ws->lock, flags);
ws                704 drivers/base/power/wakeup.c 	if (ws->active)
ws                705 drivers/base/power/wakeup.c 		wakeup_source_deactivate(ws);
ws                706 drivers/base/power/wakeup.c 	spin_unlock_irqrestore(&ws->lock, flags);
ws                739 drivers/base/power/wakeup.c 	struct wakeup_source *ws = from_timer(ws, t, timer);
ws                742 drivers/base/power/wakeup.c 	spin_lock_irqsave(&ws->lock, flags);
ws                744 drivers/base/power/wakeup.c 	if (ws->active && ws->timer_expires
ws                745 drivers/base/power/wakeup.c 	    && time_after_eq(jiffies, ws->timer_expires)) {
ws                746 drivers/base/power/wakeup.c 		wakeup_source_deactivate(ws);
ws                747 drivers/base/power/wakeup.c 		ws->expire_count++;
ws                750 drivers/base/power/wakeup.c 	spin_unlock_irqrestore(&ws->lock, flags);
ws                766 drivers/base/power/wakeup.c void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
ws                771 drivers/base/power/wakeup.c 	if (!ws)
ws                774 drivers/base/power/wakeup.c 	spin_lock_irqsave(&ws->lock, flags);
ws                776 drivers/base/power/wakeup.c 	wakeup_source_report_event(ws, hard);
ws                779 drivers/base/power/wakeup.c 		wakeup_source_deactivate(ws);
ws                787 drivers/base/power/wakeup.c 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
ws                788 drivers/base/power/wakeup.c 		mod_timer(&ws->timer, expires);
ws                789 drivers/base/power/wakeup.c 		ws->timer_expires = expires;
ws                793 drivers/base/power/wakeup.c 	spin_unlock_irqrestore(&ws->lock, flags);
ws                820 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                825 drivers/base/power/wakeup.c 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
ws                826 drivers/base/power/wakeup.c 		if (ws->active) {
ws                827 drivers/base/power/wakeup.c 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
ws                831 drivers/base/power/wakeup.c 			    ktime_to_ns(ws->last_time) >
ws                833 drivers/base/power/wakeup.c 			last_activity_ws = ws;
ws                971 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws                976 drivers/base/power/wakeup.c 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
ws                977 drivers/base/power/wakeup.c 		spin_lock_irq(&ws->lock);
ws                978 drivers/base/power/wakeup.c 		if (ws->autosleep_enabled != set) {
ws                979 drivers/base/power/wakeup.c 			ws->autosleep_enabled = set;
ws                980 drivers/base/power/wakeup.c 			if (ws->active) {
ws                982 drivers/base/power/wakeup.c 					ws->start_prevent_time = now;
ws                984 drivers/base/power/wakeup.c 					update_prevent_sleep_time(ws, now);
ws                987 drivers/base/power/wakeup.c 		spin_unlock_irq(&ws->lock);
ws                999 drivers/base/power/wakeup.c 				     struct wakeup_source *ws)
ws               1008 drivers/base/power/wakeup.c 	spin_lock_irqsave(&ws->lock, flags);
ws               1010 drivers/base/power/wakeup.c 	total_time = ws->total_time;
ws               1011 drivers/base/power/wakeup.c 	max_time = ws->max_time;
ws               1012 drivers/base/power/wakeup.c 	prevent_sleep_time = ws->prevent_sleep_time;
ws               1013 drivers/base/power/wakeup.c 	active_count = ws->active_count;
ws               1014 drivers/base/power/wakeup.c 	if (ws->active) {
ws               1017 drivers/base/power/wakeup.c 		active_time = ktime_sub(now, ws->last_time);
ws               1022 drivers/base/power/wakeup.c 		if (ws->autosleep_enabled)
ws               1024 drivers/base/power/wakeup.c 				ktime_sub(now, ws->start_prevent_time));
ws               1030 drivers/base/power/wakeup.c 		   ws->name, active_count, ws->event_count,
ws               1031 drivers/base/power/wakeup.c 		   ws->wakeup_count, ws->expire_count,
ws               1033 drivers/base/power/wakeup.c 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
ws               1036 drivers/base/power/wakeup.c 	spin_unlock_irqrestore(&ws->lock, flags);
ws               1044 drivers/base/power/wakeup.c 	struct wakeup_source *ws;
ws               1055 drivers/base/power/wakeup.c 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
ws               1057 drivers/base/power/wakeup.c 			return ws;
ws               1066 drivers/base/power/wakeup.c 	struct wakeup_source *ws = v;
ws               1071 drivers/base/power/wakeup.c 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
ws               1072 drivers/base/power/wakeup.c 		next_ws = ws;
ws               1093 drivers/base/power/wakeup.c 	struct wakeup_source *ws = v;
ws               1095 drivers/base/power/wakeup.c 	print_wakeup_source_stats(m, ws);
ws                 27 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);		\
ws                 29 drivers/base/power/wakeup_stats.c 	return sprintf(buf, "%lu\n", ws->_name);			\
ws                 41 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                 43 drivers/base/power/wakeup_stats.c 		ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
ws                 52 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                 54 drivers/base/power/wakeup_stats.c 	ktime_t total_time = ws->total_time;
ws                 56 drivers/base/power/wakeup_stats.c 	if (ws->active) {
ws                 57 drivers/base/power/wakeup_stats.c 		active_time = ktime_sub(ktime_get(), ws->last_time);
ws                 67 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                 69 drivers/base/power/wakeup_stats.c 	ktime_t max_time = ws->max_time;
ws                 71 drivers/base/power/wakeup_stats.c 	if (ws->active) {
ws                 72 drivers/base/power/wakeup_stats.c 		active_time = ktime_sub(ktime_get(), ws->last_time);
ws                 83 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                 85 drivers/base/power/wakeup_stats.c 	return sprintf(buf, "%lld\n", ktime_to_ms(ws->last_time));
ws                 92 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                 94 drivers/base/power/wakeup_stats.c 	return sprintf(buf, "%s\n", ws->name);
ws                102 drivers/base/power/wakeup_stats.c 	struct wakeup_source *ws = dev_get_drvdata(dev);
ws                103 drivers/base/power/wakeup_stats.c 	ktime_t prevent_sleep_time = ws->prevent_sleep_time;
ws                105 drivers/base/power/wakeup_stats.c 	if (ws->active && ws->autosleep_enabled) {
ws                107 drivers/base/power/wakeup_stats.c 			ktime_sub(ktime_get(), ws->start_prevent_time));
ws                134 drivers/base/power/wakeup_stats.c 						  struct wakeup_source *ws)
ws                151 drivers/base/power/wakeup_stats.c 	dev_set_drvdata(dev, ws);
ws                154 drivers/base/power/wakeup_stats.c 	retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
ws                174 drivers/base/power/wakeup_stats.c int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
ws                178 drivers/base/power/wakeup_stats.c 	dev = wakeup_source_device_create(parent, ws);
ws                181 drivers/base/power/wakeup_stats.c 	ws->dev = dev;
ws                203 drivers/base/power/wakeup_stats.c void wakeup_source_sysfs_remove(struct wakeup_source *ws)
ws                205 drivers/base/power/wakeup_stats.c 	device_unregister(ws->dev);
ws               1452 drivers/block/drbd/drbd_int.h extern void do_submit(struct work_struct *ws);
ws               1551 drivers/block/drbd/drbd_int.h extern void drbd_send_ping_wf(struct work_struct *ws);
ws               1552 drivers/block/drbd/drbd_int.h extern void drbd_send_acks_wf(struct work_struct *ws);
ws               2282 drivers/block/drbd/drbd_main.c static void do_retry(struct work_struct *ws)
ws               2284 drivers/block/drbd/drbd_main.c 	struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
ws               6149 drivers/block/drbd/drbd_receiver.c void drbd_send_acks_wf(struct work_struct *ws)
ws               6152 drivers/block/drbd/drbd_receiver.c 		container_of(ws, struct drbd_peer_device, send_acks_work);
ws               1515 drivers/block/drbd/drbd_req.c void do_submit(struct work_struct *ws)
ws               1517 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
ws                 79 drivers/char/virtio_console.c 	struct winsize ws;
ws               1180 drivers/char/virtio_console.c 		hvc_resize(port->cons.hvc, port->cons.ws);
ws               1324 drivers/char/virtio_console.c 	port->cons.ws.ws_row = rows;
ws               1325 drivers/char/virtio_console.c 	port->cons.ws.ws_col = cols;
ws               1382 drivers/char/virtio_console.c 	port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
ws                242 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
ws                 59 drivers/gpu/drm/amd/amdgpu/atom.c 	uint32_t *ps, *ws;
ws                259 drivers/gpu/drm/amd/amdgpu/atom.c 			val = ctx->ws[idx];
ws                526 drivers/gpu/drm/amd/amdgpu/atom.c 			ctx->ws[idx] = val;
ws               1204 drivers/gpu/drm/amd/amdgpu/atom.c 	int len, ws, ps, ptr;
ws               1213 drivers/gpu/drm/amd/amdgpu/atom.c 	ws = CU8(base + ATOM_CT_WS_PTR);
ws               1217 drivers/gpu/drm/amd/amdgpu/atom.c 	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
ws               1225 drivers/gpu/drm/amd/amdgpu/atom.c 	if (ws)
ws               1226 drivers/gpu/drm/amd/amdgpu/atom.c 		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
ws               1228 drivers/gpu/drm/amd/amdgpu/atom.c 		ectx.ws = NULL;
ws               1239 drivers/gpu/drm/amd/amdgpu/atom.c 				base, len, ws, ps, ptr - 1);
ws               1257 drivers/gpu/drm/amd/amdgpu/atom.c 	if (ws)
ws               1258 drivers/gpu/drm/amd/amdgpu/atom.c 		kfree(ectx.ws);
ws                 61 drivers/gpu/drm/radeon/atom.c 	uint32_t *ps, *ws;
ws                265 drivers/gpu/drm/radeon/atom.c 			val = ctx->ws[idx];
ws                532 drivers/gpu/drm/radeon/atom.c 			ctx->ws[idx] = val;
ws               1161 drivers/gpu/drm/radeon/atom.c 	int len, ws, ps, ptr;
ws               1170 drivers/gpu/drm/radeon/atom.c 	ws = CU8(base + ATOM_CT_WS_PTR);
ws               1174 drivers/gpu/drm/radeon/atom.c 	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
ws               1182 drivers/gpu/drm/radeon/atom.c 	if (ws)
ws               1183 drivers/gpu/drm/radeon/atom.c 		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
ws               1185 drivers/gpu/drm/radeon/atom.c 		ectx.ws = NULL;
ws               1196 drivers/gpu/drm/radeon/atom.c 				base, len, ws, ps, ptr - 1);
ws               1214 drivers/gpu/drm/radeon/atom.c 	if (ws)
ws               1215 drivers/gpu/drm/radeon/atom.c 		kfree(ectx.ws);
ws                395 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	bool ws = inst & QPU_WS;
ws                396 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	bool is_b = is_mul ^ ws;
ws                491 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	bool ws = inst & QPU_WS;
ws                503 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	lri_add = waddr_to_live_reg_index(waddr_add, ws);
ws                504 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
ws                 52 drivers/hwmon/gpio-fan.c static void fan_alarm_notify(struct work_struct *ws)
ws                 55 drivers/hwmon/gpio-fan.c 		container_of(ws, struct gpio_fan_data, alarm_work);
ws                 14 drivers/isdn/mISDN/hwchannel.c dchannel_bh(struct work_struct *ws)
ws                 16 drivers/isdn/mISDN/hwchannel.c 	struct dchannel	*dch  = container_of(ws, struct dchannel, workq);
ws                 37 drivers/isdn/mISDN/hwchannel.c bchannel_bh(struct work_struct *ws)
ws                 39 drivers/isdn/mISDN/hwchannel.c 	struct bchannel	*bch  = container_of(ws, struct bchannel, workq);
ws                117 drivers/leds/led-core.c static void set_brightness_delayed(struct work_struct *ws)
ws                120 drivers/leds/led-core.c 		container_of(ws, struct led_classdev, set_brightness_work);
ws                 28 drivers/lightnvm/pblk-core.c 									ws);
ws               1655 drivers/lightnvm/pblk-core.c 						struct pblk_line_ws, ws);
ws               1685 drivers/lightnvm/pblk-core.c 	INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
ws               1686 drivers/lightnvm/pblk-core.c 	queue_work(pblk->r_end_wq, &line_put_ws->ws);
ws               1850 drivers/lightnvm/pblk-core.c 									ws);
ws               1877 drivers/lightnvm/pblk-core.c 	INIT_WORK(&line_ws->ws, work);
ws               1878 drivers/lightnvm/pblk-core.c 	queue_work(wq, &line_ws->ws);
ws                 89 drivers/lightnvm/pblk-gc.c 						struct pblk_line_ws, ws);
ws                179 drivers/lightnvm/pblk-gc.c 									ws);
ws                264 drivers/lightnvm/pblk-gc.c 	INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
ws                265 drivers/lightnvm/pblk-gc.c 	queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
ws                317 drivers/lightnvm/pblk-gc.c 	INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
ws                318 drivers/lightnvm/pblk-gc.c 	queue_work(gc->gc_reader_wq, &line_ws->ws);
ws                 31 drivers/lightnvm/pblk-init.c 	struct kmem_cache	*ws;
ws                302 drivers/lightnvm/pblk-init.c 	pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
ws                304 drivers/lightnvm/pblk-init.c 	if (!pblk_caches.ws)
ws                329 drivers/lightnvm/pblk-init.c 	kmem_cache_destroy(pblk_caches.ws);
ws                358 drivers/lightnvm/pblk-init.c 	kmem_cache_destroy(c->ws);
ws                441 drivers/lightnvm/pblk-init.c 				     pblk_caches.ws);
ws                705 drivers/lightnvm/pblk.h 	struct work_struct ws;
ws                121 drivers/md/dm-cache-target.c 	struct work_struct ws;
ws                128 drivers/md/dm-cache-target.c 	INIT_WORK(&k->ws, fn);
ws                135 drivers/md/dm-cache-target.c 	queue_work(wq, &k->ws);
ws                177 drivers/md/dm-cache-target.c 	struct work_struct *ws, *tmp;
ws                198 drivers/md/dm-cache-target.c 	list_for_each_entry_safe(ws, tmp, &work_items, entry) {
ws                199 drivers/md/dm-cache-target.c 		k = container_of(ws, struct continuation, ws);
ws                201 drivers/md/dm-cache-target.c 		INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
ws                202 drivers/md/dm-cache-target.c 		queue_work(b->wq, ws);
ws                246 drivers/md/dm-cache-target.c 	list_add_tail(&k->ws.entry, &b->work_items);
ws               1167 drivers/md/dm-cache-target.c 	dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
ws               1170 drivers/md/dm-cache-target.c static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
ws               1172 drivers/md/dm-cache-target.c 	struct continuation *k = container_of(ws, struct continuation, ws);
ws               1321 drivers/md/dm-cache-target.c static void mg_success(struct work_struct *ws)
ws               1323 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1327 drivers/md/dm-cache-target.c static void mg_update_metadata(struct work_struct *ws)
ws               1330 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1389 drivers/md/dm-cache-target.c static void mg_update_metadata_after_copy(struct work_struct *ws)
ws               1391 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1399 drivers/md/dm-cache-target.c 		mg_update_metadata(ws);
ws               1402 drivers/md/dm-cache-target.c static void mg_upgrade_lock(struct work_struct *ws)
ws               1405 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1426 drivers/md/dm-cache-target.c 			mg_update_metadata(ws);
ws               1430 drivers/md/dm-cache-target.c static void mg_full_copy(struct work_struct *ws)
ws               1432 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1439 drivers/md/dm-cache-target.c 		mg_upgrade_lock(ws);
ws               1447 drivers/md/dm-cache-target.c static void mg_copy(struct work_struct *ws)
ws               1449 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1465 drivers/md/dm-cache-target.c 			mg_full_copy(ws);
ws               1479 drivers/md/dm-cache-target.c 		mg_full_copy(ws);
ws               1510 drivers/md/dm-cache-target.c 		mg_copy(&mg->k.ws);
ws               1559 drivers/md/dm-cache-target.c static void invalidate_completed(struct work_struct *ws)
ws               1561 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1588 drivers/md/dm-cache-target.c static void invalidate_remove(struct work_struct *ws)
ws               1591 drivers/md/dm-cache-target.c 	struct dm_cache_migration *mg = ws_to_mg(ws);
ws               1637 drivers/md/dm-cache-target.c 		queue_work(cache->wq, &mg->k.ws);
ws               1888 drivers/md/dm-cache-target.c static void process_deferred_bios(struct work_struct *ws)
ws               1890 drivers/md/dm-cache-target.c 	struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
ws               1942 drivers/md/dm-cache-target.c static void do_waker(struct work_struct *ws)
ws               1944 drivers/md/dm-cache-target.c 	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
ws               1952 drivers/md/dm-cache-target.c static void check_migrations(struct work_struct *ws)
ws               1956 drivers/md/dm-cache-target.c 	struct cache *cache = container_of(ws, struct cache, migration_worker);
ws                 47 drivers/md/dm-era-target.c static void writeset_free(struct writeset *ws)
ws                 49 drivers/md/dm-era-target.c 	vfree(ws->bits);
ws                 72 drivers/md/dm-era-target.c static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
ws                 74 drivers/md/dm-era-target.c 	ws->md.nr_bits = nr_blocks;
ws                 75 drivers/md/dm-era-target.c 	ws->md.root = INVALID_WRITESET_ROOT;
ws                 76 drivers/md/dm-era-target.c 	ws->bits = vzalloc(bitset_size(nr_blocks));
ws                 77 drivers/md/dm-era-target.c 	if (!ws->bits) {
ws                 88 drivers/md/dm-era-target.c static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
ws                 92 drivers/md/dm-era-target.c 	memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
ws                 94 drivers/md/dm-era-target.c 	r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
ws                103 drivers/md/dm-era-target.c static bool writeset_marked(struct writeset *ws, dm_block_t block)
ws                105 drivers/md/dm-era-target.c 	return test_bit(block, ws->bits);
ws                133 drivers/md/dm-era-target.c 				 struct writeset *ws, uint32_t block)
ws                137 drivers/md/dm-era-target.c 	if (!test_and_set_bit(block, ws->bits)) {
ws                138 drivers/md/dm-era-target.c 		r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
ws                939 drivers/md/dm-era-target.c 	struct writeset *ws;
ws                942 drivers/md/dm-era-target.c 	ws = rcu_dereference(md->current_writeset);
ws                943 drivers/md/dm-era-target.c 	r = writeset_marked(ws, block);
ws               1307 drivers/md/dm-era-target.c static void do_work(struct work_struct *ws)
ws               1309 drivers/md/dm-era-target.c 	struct era *era = container_of(ws, struct era, worker);
ws               1875 drivers/md/dm-integrity.c 			unsigned ws, we, range_sectors;
ws               1897 drivers/md/dm-integrity.c 			ws = journal_section;
ws               1908 drivers/md/dm-integrity.c 				je = access_journal_entry(ic, ws, we);
ws               1914 drivers/md/dm-integrity.c 					ws++;
ws               1915 drivers/md/dm-integrity.c 					wraparound_section(ic, &ws);
ws               1710 drivers/md/dm-raid.c static void do_table_event(struct work_struct *ws)
ws               1712 drivers/md/dm-raid.c 	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
ws               2409 drivers/md/dm-thin.c static void do_worker(struct work_struct *ws)
ws               2411 drivers/md/dm-thin.c 	struct pool *pool = container_of(ws, struct pool, worker);
ws               2430 drivers/md/dm-thin.c static void do_waker(struct work_struct *ws)
ws               2432 drivers/md/dm-thin.c 	struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
ws               2442 drivers/md/dm-thin.c static void do_no_space_timeout(struct work_struct *ws)
ws               2444 drivers/md/dm-thin.c 	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
ws               2461 drivers/md/dm-thin.c static struct pool_work *to_pool_work(struct work_struct *ws)
ws               2463 drivers/md/dm-thin.c 	return container_of(ws, struct pool_work, worker);
ws               2487 drivers/md/dm-thin.c static struct noflush_work *to_noflush(struct work_struct *ws)
ws               2489 drivers/md/dm-thin.c 	return container_of(to_pool_work(ws), struct noflush_work, pw);
ws               2492 drivers/md/dm-thin.c static void do_noflush_start(struct work_struct *ws)
ws               2494 drivers/md/dm-thin.c 	struct noflush_work *w = to_noflush(ws);
ws               2500 drivers/md/dm-thin.c static void do_noflush_stop(struct work_struct *ws)
ws               2502 drivers/md/dm-thin.c 	struct noflush_work *w = to_noflush(ws);
ws                492 drivers/md/md.c static void md_submit_flush_data(struct work_struct *ws);
ws                494 drivers/md/md.c static void submit_flushes(struct work_struct *ws)
ws                496 drivers/md/md.c 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
ws                529 drivers/md/md.c static void md_submit_flush_data(struct work_struct *ws)
ws                531 drivers/md/md.c 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
ws                597 drivers/md/md.c static void mddev_delayed_delete(struct work_struct *ws);
ws               2364 drivers/md/md.c static void md_delayed_delete(struct work_struct *ws)
ws               2366 drivers/md/md.c 	struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
ws               5400 drivers/md/md.c static void mddev_delayed_delete(struct work_struct *ws)
ws               5402 drivers/md/md.c 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
ws               8915 drivers/md/md.c static void md_start_sync(struct work_struct *ws)
ws               8917 drivers/md/md.c 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
ws                121 drivers/media/platform/mtk-vpu/mtk_vpu.c 	struct work_struct ws;
ws                360 drivers/media/platform/mtk-vpu/mtk_vpu.c static void vpu_wdt_reset_func(struct work_struct *ws)
ws                362 drivers/media/platform/mtk-vpu/mtk_vpu.c 	struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
ws                755 drivers/media/platform/mtk-vpu/mtk_vpu.c 		queue_work(vpu->wdt.wq, &vpu->wdt.ws);
ws                814 drivers/media/platform/mtk-vpu/mtk_vpu.c 	INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
ws                267 drivers/mtd/nand/raw/marvell_nand.c #define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb)	\
ws                269 drivers/mtd/nand/raw/marvell_nand.c 		.writesize = ws,					\
ws               1531 drivers/net/can/m_can/m_can.c static void m_can_tx_work_queue(struct work_struct *ws)
ws               1533 drivers/net/can/m_can/m_can.c 	struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
ws                573 drivers/net/can/spi/hi311x.c static void hi3110_tx_work_handler(struct work_struct *ws)
ws                575 drivers/net/can/spi/hi311x.c 	struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
ws                596 drivers/net/can/spi/hi311x.c static void hi3110_restart_work_handler(struct work_struct *ws)
ws                598 drivers/net/can/spi/hi311x.c 	struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
ws                683 drivers/net/can/spi/mcp251x.c static void mcp251x_tx_work_handler(struct work_struct *ws)
ws                685 drivers/net/can/spi/mcp251x.c 	struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
ws                709 drivers/net/can/spi/mcp251x.c static void mcp251x_restart_work_handler(struct work_struct *ws)
ws                711 drivers/net/can/spi/mcp251x.c 	struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
ws                802 drivers/net/ethernet/microchip/encx24j600.c static void encx24j600_setrx_proc(struct kthread_work *ws)
ws                805 drivers/net/ethernet/microchip/encx24j600.c 			container_of(ws, struct encx24j600_priv, setrx_work);
ws                867 drivers/net/ethernet/microchip/encx24j600.c static void encx24j600_tx_proc(struct kthread_work *ws)
ws                870 drivers/net/ethernet/microchip/encx24j600.c 			container_of(ws, struct encx24j600_priv, tx_work);
ws               1260 drivers/net/ethernet/pensando/ionic/ionic_lif.c static void ionic_tx_timeout_work(struct work_struct *ws)
ws               1262 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
ws               2074 drivers/net/ethernet/pensando/ionic/ionic_lif.c static void ionic_lif_notify_work(struct work_struct *ws)
ws                582 drivers/net/wimax/i2400m/driver.c void __i2400m_dev_reset_handle(struct work_struct *ws)
ws                584 drivers/net/wimax/i2400m/driver.c 	struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
ws                590 drivers/net/wimax/i2400m/driver.c 	d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
ws                664 drivers/net/wimax/i2400m/driver.c 		ws, i2400m, reason);
ws                694 drivers/net/wimax/i2400m/driver.c void __i2400m_error_recovery(struct work_struct *ws)
ws                696 drivers/net/wimax/i2400m/driver.c 	struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
ws                138 drivers/net/wimax/i2400m/netdev.c void i2400m_wake_tx_work(struct work_struct *ws)
ws                141 drivers/net/wimax/i2400m/netdev.c 	struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
ws                152 drivers/net/wimax/i2400m/netdev.c 	d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
ws                192 drivers/net/wimax/i2400m/netdev.c 		ws, i2400m, skb, result);
ws                183 drivers/net/wimax/i2400m/rx.c void i2400m_report_hook_work(struct work_struct *ws)
ws                185 drivers/net/wimax/i2400m/rx.c 	struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
ws                490 drivers/net/wimax/i2400m/rx.c 	unsigned ws;
ws                499 drivers/net/wimax/i2400m/rx.c 	roq->ws = 0;
ws                525 drivers/net/wimax/i2400m/rx.c 	r =  ((int) sn - (int) roq->ws) % 2048;
ws                545 drivers/net/wimax/i2400m/rx.c 		unsigned ws, count, sn, nsn, new_ws;
ws                563 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
ws                567 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn);
ws                572 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
ws                577 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
ws                590 drivers/net/wimax/i2400m/rx.c 			unsigned ws, unsigned count, unsigned sn,
ws                604 drivers/net/wimax/i2400m/rx.c 	e->ws = ws;
ws                672 drivers/net/wimax/i2400m/rx.c 		 roq, roq->ws, nsn, roq_data->sn);
ws                714 drivers/net/wimax/i2400m/rx.c 		roq, roq->ws, skb, nsn, roq_data->sn);
ws                771 drivers/net/wimax/i2400m/rx.c 	roq->ws = sn;
ws                794 drivers/net/wimax/i2400m/rx.c 			     roq->ws, skb_queue_len(&roq->queue),
ws                803 drivers/net/wimax/i2400m/rx.c 	roq->ws = 0;
ws                833 drivers/net/wimax/i2400m/rx.c 			nsn, lbn, roq->ws);
ws                839 drivers/net/wimax/i2400m/rx.c 				     roq->ws, len, lbn, nsn, ~0);
ws                862 drivers/net/wimax/i2400m/rx.c 	old_ws = roq->ws;
ws                866 drivers/net/wimax/i2400m/rx.c 			     old_ws, len, sn, nsn, roq->ws);
ws                900 drivers/net/wimax/i2400m/rx.c 	old_ws = roq->ws;
ws                913 drivers/net/wimax/i2400m/rx.c 			   old_ws, len, sn, nsn, roq->ws);
ws               1046 drivers/net/wimax/i2400m/rx.c 			 ro_type, ro_cin, roq->ws, ro_sn,
ws               2472 drivers/net/wireless/intersil/prism54/isl_ioctl.c 		container_of(work, struct islpci_mgmtframe, ws);
ws                369 drivers/net/wireless/intersil/prism54/islpci_mgt.c 			INIT_WORK(&frame->ws, prism54_process_trap);
ws                370 drivers/net/wireless/intersil/prism54/islpci_mgt.c 			schedule_work(&frame->ws);
ws                101 drivers/net/wireless/intersil/prism54/islpci_mgt.h         struct work_struct ws;	      /* argument for schedule_work() */
ws               1611 drivers/pinctrl/sh-pfc/pfc-r8a7778.c #define SSI_PFC_CTRL(name, sck, ws)		SH_PFC_MUX2(name, sck, ws)
ws                997 drivers/power/supply/power_supply_core.c 				   bool ws)
ws               1058 drivers/power/supply/power_supply_core.c 	rc = device_init_wakeup(dev, ws);
ws                852 drivers/s390/char/tty3270.c 	struct winsize ws;
ws                880 drivers/s390/char/tty3270.c 	ws.ws_row = tp->view.rows - 2;
ws                881 drivers/s390/char/tty3270.c 	ws.ws_col = tp->view.cols;
ws                882 drivers/s390/char/tty3270.c 	tty_do_resize(tty, &ws);
ws                785 drivers/scsi/cxlflash/superpipe.c 	u8 *ws = NULL;
ws                790 drivers/scsi/cxlflash/superpipe.c 	ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
ws                791 drivers/scsi/cxlflash/superpipe.c 	if (unlikely(!ctxi || !lli || !ws)) {
ws                803 drivers/scsi/cxlflash/superpipe.c 	ctxi->rht_needs_ws = ws;
ws                809 drivers/scsi/cxlflash/superpipe.c 	kfree(ws);
ws                 77 drivers/spi/spi-sh.c 	struct work_struct ws;
ws                276 drivers/spi/spi-sh.c 	struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
ws                374 drivers/spi/spi-sh.c 	schedule_work(&ss->ws);
ws                419 drivers/spi/spi-sh.c 	flush_work(&ss->ws);
ws                474 drivers/spi/spi-sh.c 	INIT_WORK(&ss->ws, spi_sh_work);
ws                 26 drivers/staging/kpc2000/kpc_dma/dma.c void  ndd_irq_worker(struct work_struct *ws)
ws                 29 drivers/staging/kpc2000/kpc_dma/dma.c 	struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work);
ws                 77 drivers/staging/most/usb/usb.c 	struct work_struct ws;
ws                 83 drivers/staging/most/usb/usb.c #define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
ws                248 drivers/staging/most/usb/usb.c 	cancel_work_sync(&mdev->clear_work[channel].ws);
ws                361 drivers/staging/most/usb/usb.c 			schedule_work(&mdev->clear_work[channel].ws);
ws                515 drivers/staging/most/usb/usb.c 			schedule_work(&mdev->clear_work[channel].ws);
ws                673 drivers/staging/most/usb/usb.c 	INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
ws                 84 drivers/staging/wilc1000/wilc_hif.c 	void (*fn)(struct work_struct *ws);
ws                435 drivers/staging/wusbcore/devconnect.c static void wusbhc_keep_alive_run(struct work_struct *ws)
ws                437 drivers/staging/wusbcore/devconnect.c 	struct delayed_work *dw = to_delayed_work(ws);
ws                 79 drivers/staging/wusbcore/wa-hc.h extern void wa_urb_enqueue_run(struct work_struct *ws);
ws                 80 drivers/staging/wusbcore/wa-hc.h extern void wa_process_errored_transfers_run(struct work_struct *ws);
ws                 78 drivers/staging/wusbcore/wa-nep.c static void wa_notif_dispatch(struct work_struct *ws)
ws                 82 drivers/staging/wusbcore/wa-nep.c 	struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
ws               1737 drivers/staging/wusbcore/wa-xfer.c void wa_urb_enqueue_run(struct work_struct *ws)
ws               1739 drivers/staging/wusbcore/wa-xfer.c 	struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
ws               1768 drivers/staging/wusbcore/wa-xfer.c void wa_process_errored_transfers_run(struct work_struct *ws)
ws               1770 drivers/staging/wusbcore/wa-xfer.c 	struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
ws                146 drivers/target/iscsi/iscsi_target_util.c 	struct sbq_wait_state *ws;
ws                153 drivers/target/iscsi/iscsi_target_util.c 	ws = &sbq->ws[0];
ws                155 drivers/target/iscsi/iscsi_target_util.c 		sbitmap_prepare_to_wait(sbq, ws, &wait, state);
ws                164 drivers/target/iscsi/iscsi_target_util.c 	sbitmap_finish_wait(sbq, ws, &wait);
ws                568 drivers/tty/hvc/hvc_console.c 	struct winsize ws;
ws                577 drivers/tty/hvc/hvc_console.c 	ws = hp->ws;
ws                580 drivers/tty/hvc/hvc_console.c 	tty_do_resize(tty, &ws);
ws                778 drivers/tty/hvc/hvc_console.c void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
ws                780 drivers/tty/hvc/hvc_console.c 	hp->ws = ws;
ws                 47 drivers/tty/hvc/hvc_console.h 	struct winsize ws;
ws                 87 drivers/tty/hvc/hvc_console.h extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
ws                 89 drivers/tty/hvc/hvc_console.h static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
ws                 94 drivers/tty/hvc/hvc_console.h 	__hvc_resize(hp, ws);
ws                303 drivers/tty/pty.c static int pty_resize(struct tty_struct *tty,  struct winsize *ws)
ws                310 drivers/tty/pty.c 	if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
ws                325 drivers/tty/pty.c 	tty->winsize = *ws;
ws                326 drivers/tty/pty.c 	pty->winsize = *ws;	/* Never used so will go away soon */
ws                851 drivers/tty/serial/max310x.c static void max310x_tx_proc(struct work_struct *ws)
ws                853 drivers/tty/serial/max310x.c 	struct max310x_one *one = container_of(ws, struct max310x_one, tx_work);
ws                873 drivers/tty/serial/max310x.c static void max310x_md_proc(struct work_struct *ws)
ws                875 drivers/tty/serial/max310x.c 	struct max310x_one *one = container_of(ws, struct max310x_one, md_work);
ws               1007 drivers/tty/serial/max310x.c static void max310x_rs_proc(struct work_struct *ws)
ws               1009 drivers/tty/serial/max310x.c 	struct max310x_one *one = container_of(ws, struct max310x_one, rs_work);
ws                713 drivers/tty/serial/sc16is7xx.c static void sc16is7xx_ist(struct kthread_work *ws)
ws                715 drivers/tty/serial/sc16is7xx.c 	struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
ws                741 drivers/tty/serial/sc16is7xx.c static void sc16is7xx_tx_proc(struct kthread_work *ws)
ws                743 drivers/tty/serial/sc16is7xx.c 	struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
ws                772 drivers/tty/serial/sc16is7xx.c static void sc16is7xx_reg_proc(struct kthread_work *ws)
ws                774 drivers/tty/serial/sc16is7xx.c 	struct sc16is7xx_one *one = to_sc16is7xx_one(ws, reg_work);
ws               2234 drivers/tty/tty_io.c int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
ws               2240 drivers/tty/tty_io.c 	if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
ws               2249 drivers/tty/tty_io.c 	tty->winsize = *ws;
ws               1305 drivers/tty/vt/vt.c 		struct winsize ws;
ws               1306 drivers/tty/vt/vt.c 		memset(&ws, 0, sizeof(ws));
ws               1307 drivers/tty/vt/vt.c 		ws.ws_row = vc->vc_rows;
ws               1308 drivers/tty/vt/vt.c 		ws.ws_col = vc->vc_cols;
ws               1309 drivers/tty/vt/vt.c 		ws.ws_ypixel = vc->vc_scan_lines;
ws               1310 drivers/tty/vt/vt.c 		tty_do_resize(tty, &ws);
ws               1349 drivers/tty/vt/vt.c static int vt_resize(struct tty_struct *tty, struct winsize *ws)
ws               1355 drivers/tty/vt/vt.c 	ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
ws               1013 drivers/usb/core/hub.c static void hub_init_func2(struct work_struct *ws);
ws               1014 drivers/usb/core/hub.c static void hub_init_func3(struct work_struct *ws);
ws               1286 drivers/usb/core/hub.c static void hub_init_func2(struct work_struct *ws)
ws               1288 drivers/usb/core/hub.c 	struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
ws               1293 drivers/usb/core/hub.c static void hub_init_func3(struct work_struct *ws)
ws               1295 drivers/usb/core/hub.c 	struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
ws               1751 drivers/usb/core/message.c static void __usb_queue_reset_device(struct work_struct *ws)
ws               1755 drivers/usb/core/message.c 		container_of(ws, struct usb_interface, reset_ws);
ws                838 fs/afs/cell.c  	struct afs_cell *ws;
ws                843 fs/afs/cell.c  	ws = rcu_access_pointer(net->ws_cell);
ws                846 fs/afs/cell.c  	afs_put_cell(net, ws);
ws                782 fs/btrfs/compression.c static void heuristic_put_workspace(struct list_head *ws)
ws                784 fs/btrfs/compression.c 	btrfs_put_workspace(&heuristic_wsm, ws);
ws                787 fs/btrfs/compression.c static void free_heuristic_ws(struct list_head *ws)
ws                791 fs/btrfs/compression.c 	workspace = list_entry(ws, struct heuristic_ws, list);
ws                801 fs/btrfs/compression.c 	struct heuristic_ws *ws;
ws                803 fs/btrfs/compression.c 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
ws                804 fs/btrfs/compression.c 	if (!ws)
ws                807 fs/btrfs/compression.c 	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
ws                808 fs/btrfs/compression.c 	if (!ws->sample)
ws                811 fs/btrfs/compression.c 	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
ws                812 fs/btrfs/compression.c 	if (!ws->bucket)
ws                815 fs/btrfs/compression.c 	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
ws                816 fs/btrfs/compression.c 	if (!ws->bucket_b)
ws                819 fs/btrfs/compression.c 	INIT_LIST_HEAD(&ws->list);
ws                820 fs/btrfs/compression.c 	return &ws->list;
ws                822 fs/btrfs/compression.c 	free_heuristic_ws(&ws->list);
ws                872 fs/btrfs/compression.c 	struct list_head *ws;
ws                875 fs/btrfs/compression.c 		ws = wsman->idle_ws.next;
ws                876 fs/btrfs/compression.c 		list_del(ws);
ws                877 fs/btrfs/compression.c 		wsman->ops->free_workspace(ws);
ws                975 fs/btrfs/compression.c void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
ws                991 fs/btrfs/compression.c 		list_add(ws, idle_ws);
ws                998 fs/btrfs/compression.c 	wsm->ops->free_workspace(ws);
ws               1004 fs/btrfs/compression.c static void put_workspace(int type, struct list_head *ws)
ws               1006 fs/btrfs/compression.c 	return btrfs_compress_op[type]->put_workspace(ws);
ws               1247 fs/btrfs/compression.c static u32 shannon_entropy(struct heuristic_ws *ws)
ws               1254 fs/btrfs/compression.c 	sz_base = ilog2_w(ws->sample_size);
ws               1255 fs/btrfs/compression.c 	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
ws               1256 fs/btrfs/compression.c 		p = ws->bucket[i].count;
ws               1261 fs/btrfs/compression.c 	entropy_sum /= ws->sample_size;
ws               1383 fs/btrfs/compression.c static int byte_core_set_size(struct heuristic_ws *ws)
ws               1387 fs/btrfs/compression.c 	const u32 core_set_threshold = ws->sample_size * 90 / 100;
ws               1388 fs/btrfs/compression.c 	struct bucket_item *bucket = ws->bucket;
ws               1391 fs/btrfs/compression.c 	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
ws               1421 fs/btrfs/compression.c static u32 byte_set_size(const struct heuristic_ws *ws)
ws               1427 fs/btrfs/compression.c 		if (ws->bucket[i].count > 0)
ws               1437 fs/btrfs/compression.c 		if (ws->bucket[i].count > 0) {
ws               1447 fs/btrfs/compression.c static bool sample_repeated_patterns(struct heuristic_ws *ws)
ws               1449 fs/btrfs/compression.c 	const u32 half_of_sample = ws->sample_size / 2;
ws               1450 fs/btrfs/compression.c 	const u8 *data = ws->sample;
ws               1456 fs/btrfs/compression.c 				     struct heuristic_ws *ws)
ws               1492 fs/btrfs/compression.c 			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
ws               1504 fs/btrfs/compression.c 	ws->sample_size = curr_sample_pos;
ws               1525 fs/btrfs/compression.c 	struct heuristic_ws *ws;
ws               1530 fs/btrfs/compression.c 	ws = list_entry(ws_list, struct heuristic_ws, list);
ws               1532 fs/btrfs/compression.c 	heuristic_collect_sample(inode, start, end, ws);
ws               1534 fs/btrfs/compression.c 	if (sample_repeated_patterns(ws)) {
ws               1539 fs/btrfs/compression.c 	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
ws               1541 fs/btrfs/compression.c 	for (i = 0; i < ws->sample_size; i++) {
ws               1542 fs/btrfs/compression.c 		byte = ws->sample[i];
ws               1543 fs/btrfs/compression.c 		ws->bucket[byte].count++;
ws               1546 fs/btrfs/compression.c 	i = byte_set_size(ws);
ws               1552 fs/btrfs/compression.c 	i = byte_core_set_size(ws);
ws               1563 fs/btrfs/compression.c 	i = shannon_entropy(ws);
ws                126 fs/btrfs/compression.h void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
ws                136 fs/btrfs/compression.h 	void (*put_workspace)(struct list_head *ws);
ws                 81 fs/btrfs/lzo.c static void lzo_put_workspace(struct list_head *ws)
ws                 83 fs/btrfs/lzo.c 	btrfs_put_workspace(&wsm, ws);
ws                 86 fs/btrfs/lzo.c static void lzo_free_workspace(struct list_head *ws)
ws                 88 fs/btrfs/lzo.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                134 fs/btrfs/lzo.c static int lzo_compress_pages(struct list_head *ws,
ws                142 fs/btrfs/lzo.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                306 fs/btrfs/lzo.c static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ws                308 fs/btrfs/lzo.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                447 fs/btrfs/lzo.c static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
ws                452 fs/btrfs/lzo.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                 44 fs/btrfs/zlib.c 	struct list_head *ws = btrfs_get_workspace(&wsm, level);
ws                 45 fs/btrfs/zlib.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                 49 fs/btrfs/zlib.c 	return ws;
ws                 52 fs/btrfs/zlib.c static void zlib_put_workspace(struct list_head *ws)
ws                 54 fs/btrfs/zlib.c 	btrfs_put_workspace(&wsm, ws);
ws                 57 fs/btrfs/zlib.c static void zlib_free_workspace(struct list_head *ws)
ws                 59 fs/btrfs/zlib.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                 91 fs/btrfs/zlib.c static int zlib_compress_pages(struct list_head *ws,
ws                 99 fs/btrfs/zlib.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                231 fs/btrfs/zlib.c static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ws                233 fs/btrfs/zlib.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                322 fs/btrfs/zlib.c static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
ws                327 fs/btrfs/zlib.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                 94 fs/btrfs/zstd.c static void zstd_free_workspace(struct list_head *ws);
ws                173 fs/btrfs/zstd.c 	struct list_head *ws;
ws                187 fs/btrfs/zstd.c 	ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
ws                188 fs/btrfs/zstd.c 	if (IS_ERR(ws)) {
ws                193 fs/btrfs/zstd.c 		list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
ws                230 fs/btrfs/zstd.c 	struct list_head *ws;
ws                237 fs/btrfs/zstd.c 			ws = wsm.idle_ws[i].next;
ws                238 fs/btrfs/zstd.c 			workspace = list_to_workspace(ws);
ws                239 fs/btrfs/zstd.c 			list_del_init(ws);
ws                247 fs/btrfs/zstd.c 			return ws;
ws                266 fs/btrfs/zstd.c 	struct list_head *ws;
ws                274 fs/btrfs/zstd.c 	ws = zstd_find_workspace(level);
ws                275 fs/btrfs/zstd.c 	if (ws)
ws                276 fs/btrfs/zstd.c 		return ws;
ws                279 fs/btrfs/zstd.c 	ws = zstd_alloc_workspace(level);
ws                282 fs/btrfs/zstd.c 	if (IS_ERR(ws)) {
ws                292 fs/btrfs/zstd.c 	return ws;
ws                305 fs/btrfs/zstd.c static void zstd_put_workspace(struct list_head *ws)
ws                307 fs/btrfs/zstd.c 	struct workspace *workspace = list_to_workspace(ws);
ws                335 fs/btrfs/zstd.c static void zstd_free_workspace(struct list_head *ws)
ws                337 fs/btrfs/zstd.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                370 fs/btrfs/zstd.c static int zstd_compress_pages(struct list_head *ws,
ws                378 fs/btrfs/zstd.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                551 fs/btrfs/zstd.c static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ws                553 fs/btrfs/zstd.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                629 fs/btrfs/zstd.c static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
ws                634 fs/btrfs/zstd.c 	struct workspace *workspace = list_entry(ws, struct workspace, list);
ws                170 fs/eventpoll.c 	struct wakeup_source __rcu *ws;
ws                213 fs/eventpoll.c 	struct wakeup_source *ws;
ws                626 fs/eventpoll.c 	return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
ws                632 fs/eventpoll.c 	struct wakeup_source *ws = ep_wakeup_source(epi);
ws                634 fs/eventpoll.c 	if (ws)
ws                635 fs/eventpoll.c 		__pm_stay_awake(ws);
ws                640 fs/eventpoll.c 	return rcu_access_pointer(epi->ws) ? true : false;
ws                646 fs/eventpoll.c 	struct wakeup_source *ws;
ws                649 fs/eventpoll.c 	ws = rcu_dereference(epi->ws);
ws                650 fs/eventpoll.c 	if (ws)
ws                651 fs/eventpoll.c 		__pm_stay_awake(ws);
ws                740 fs/eventpoll.c 	__pm_relax(ep->ws);
ws                860 fs/eventpoll.c 	wakeup_source_unregister(ep->ws);
ws               1459 fs/eventpoll.c 	struct wakeup_source *ws;
ws               1461 fs/eventpoll.c 	if (!epi->ep->ws) {
ws               1462 fs/eventpoll.c 		epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
ws               1463 fs/eventpoll.c 		if (!epi->ep->ws)
ws               1468 fs/eventpoll.c 	ws = wakeup_source_register(NULL, name);
ws               1470 fs/eventpoll.c 	if (!ws)
ws               1472 fs/eventpoll.c 	rcu_assign_pointer(epi->ws, ws);
ws               1480 fs/eventpoll.c 	struct wakeup_source *ws = ep_wakeup_source(epi);
ws               1482 fs/eventpoll.c 	RCU_INIT_POINTER(epi->ws, NULL);
ws               1490 fs/eventpoll.c 	wakeup_source_unregister(ws);
ws               1527 fs/eventpoll.c 		RCU_INIT_POINTER(epi->ws, NULL);
ws               1708 fs/eventpoll.c 	struct wakeup_source *ws;
ws               1734 fs/eventpoll.c 		ws = ep_wakeup_source(epi);
ws               1735 fs/eventpoll.c 		if (ws) {
ws               1736 fs/eventpoll.c 			if (ws->active)
ws               1737 fs/eventpoll.c 				__pm_stay_awake(ep->ws);
ws               1738 fs/eventpoll.c 			__pm_relax(ws);
ws                321 include/acpi/acpi_bus.h 	struct wakeup_source *ws;
ws                 89 include/linux/pm_wakeup.h extern void wakeup_source_destroy(struct wakeup_source *ws);
ws                 90 include/linux/pm_wakeup.h extern void wakeup_source_add(struct wakeup_source *ws);
ws                 91 include/linux/pm_wakeup.h extern void wakeup_source_remove(struct wakeup_source *ws);
ws                 94 include/linux/pm_wakeup.h extern void wakeup_source_unregister(struct wakeup_source *ws);
ws                100 include/linux/pm_wakeup.h extern void __pm_stay_awake(struct wakeup_source *ws);
ws                102 include/linux/pm_wakeup.h extern void __pm_relax(struct wakeup_source *ws);
ws                104 include/linux/pm_wakeup.h extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard);
ws                124 include/linux/pm_wakeup.h static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
ws                126 include/linux/pm_wakeup.h static inline void wakeup_source_add(struct wakeup_source *ws) {}
ws                128 include/linux/pm_wakeup.h static inline void wakeup_source_remove(struct wakeup_source *ws) {}
ws                136 include/linux/pm_wakeup.h static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
ws                170 include/linux/pm_wakeup.h static inline void __pm_stay_awake(struct wakeup_source *ws) {}
ws                174 include/linux/pm_wakeup.h static inline void __pm_relax(struct wakeup_source *ws) {}
ws                178 include/linux/pm_wakeup.h static inline void pm_wakeup_ws_event(struct wakeup_source *ws,
ws                186 include/linux/pm_wakeup.h static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
ws                188 include/linux/pm_wakeup.h 	return pm_wakeup_ws_event(ws, msec, false);
ws                125 include/linux/sbitmap.h 	struct sbq_wait_state *ws;
ws                385 include/linux/sbitmap.h 	kfree(sbq->ws);
ws                520 include/linux/sbitmap.h 	struct sbq_wait_state *ws;
ws                522 include/linux/sbitmap.h 	ws = &sbq->ws[atomic_read(wait_index)];
ws                524 include/linux/sbitmap.h 	return ws;
ws                571 include/linux/sbitmap.h 				struct sbq_wait_state *ws,
ws                577 include/linux/sbitmap.h void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
ws                584 include/linux/sbitmap.h 			    struct sbq_wait_state *ws,
ws                498 include/linux/tty.h extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
ws                287 include/linux/tty_driver.h 	int (*resize)(struct tty_struct *tty, struct winsize *ws);
ws                268 kernel/bpf/core.c 	u32 ws[SHA_WORKSPACE_WORDS];
ws                281 kernel/bpf/core.c 	memset(ws, 0, sizeof(ws));
ws                323 kernel/bpf/core.c 		sha_transform(digest, todo, ws);
ws                 30 kernel/power/wakelock.c 	struct wakeup_source	*ws;
ws                 49 kernel/power/wakelock.c 		if (wl->ws->active == show_active)
ws                115 kernel/power/wakelock.c 		spin_lock_irq(&wl->ws->lock);
ws                116 kernel/power/wakelock.c 		idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time));
ws                117 kernel/power/wakelock.c 		active = wl->ws->active;
ws                118 kernel/power/wakelock.c 		spin_unlock_irq(&wl->ws->lock);
ws                124 kernel/power/wakelock.c 			wakeup_source_unregister(wl->ws);
ws                191 kernel/power/wakelock.c 	wl->ws = wakeup_source_register(NULL, wl->name);
ws                192 kernel/power/wakelock.c 	if (!wl->ws) {
ws                197 kernel/power/wakelock.c 	wl->ws->last_time = ktime_get();
ws                242 kernel/power/wakelock.c 		__pm_wakeup_event(wl->ws, timeout_ms);
ws                244 kernel/power/wakelock.c 		__pm_stay_awake(wl->ws);
ws                280 kernel/power/wakelock.c 	__pm_relax(wl->ws);
ws               7623 kernel/sched/core.c 		u64 ws = 0;
ws               7627 kernel/sched/core.c 			ws += schedstat_val(tg->se[i]->statistics.wait_sum);
ws               7629 kernel/sched/core.c 		seq_printf(sf, "wait_sum %llu\n", ws);
ws                 58 kernel/time/alarmtimer.c static struct wakeup_source *ws;
ws                116 kernel/time/alarmtimer.c 		ws = __ws;
ws                293 kernel/time/alarmtimer.c 		__pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
ws                308 kernel/time/alarmtimer.c 		__pm_wakeup_event(ws, MSEC_PER_SEC);
ws                 98 lib/reed_solomon/test_rslib.c static void free_ws(struct wspace *ws)
ws                100 lib/reed_solomon/test_rslib.c 	if (!ws)
ws                103 lib/reed_solomon/test_rslib.c 	kfree(ws->errlocs);
ws                104 lib/reed_solomon/test_rslib.c 	kfree(ws->c);
ws                105 lib/reed_solomon/test_rslib.c 	kfree(ws);
ws                111 lib/reed_solomon/test_rslib.c 	struct wspace *ws;
ws                114 lib/reed_solomon/test_rslib.c 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
ws                115 lib/reed_solomon/test_rslib.c 	if (!ws)
ws                118 lib/reed_solomon/test_rslib.c 	ws->c = kmalloc_array(2 * (nn + nroots),
ws                120 lib/reed_solomon/test_rslib.c 	if (!ws->c)
ws                123 lib/reed_solomon/test_rslib.c 	ws->r = ws->c + nn;
ws                124 lib/reed_solomon/test_rslib.c 	ws->s = ws->r + nn;
ws                125 lib/reed_solomon/test_rslib.c 	ws->corr = ws->s + nroots;
ws                127 lib/reed_solomon/test_rslib.c 	ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
ws                128 lib/reed_solomon/test_rslib.c 	if (!ws->errlocs)
ws                131 lib/reed_solomon/test_rslib.c 	ws->derrlocs = ws->errlocs + nn;
ws                132 lib/reed_solomon/test_rslib.c 	return ws;
ws                135 lib/reed_solomon/test_rslib.c 	free_ws(ws);
ws                151 lib/reed_solomon/test_rslib.c static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
ws                155 lib/reed_solomon/test_rslib.c 	int *derrlocs = ws->derrlocs;
ws                156 lib/reed_solomon/test_rslib.c 	int *errlocs = ws->errlocs;
ws                159 lib/reed_solomon/test_rslib.c 	uint16_t *c = ws->c;
ws                160 lib/reed_solomon/test_rslib.c 	uint16_t *r = ws->r;
ws                262 lib/reed_solomon/test_rslib.c 		struct wspace *ws, int method)
ws                265 lib/reed_solomon/test_rslib.c 	int *derrlocs = ws->derrlocs;
ws                266 lib/reed_solomon/test_rslib.c 	int *errlocs = ws->errlocs;
ws                267 lib/reed_solomon/test_rslib.c 	uint16_t *corr = ws->corr;
ws                268 lib/reed_solomon/test_rslib.c 	uint16_t *c = ws->c;
ws                269 lib/reed_solomon/test_rslib.c 	uint16_t *r = ws->r;
ws                270 lib/reed_solomon/test_rslib.c 	uint16_t *s = ws->s;
ws                275 lib/reed_solomon/test_rslib.c 		nerrs = get_rcw_we(rs, ws, len, errs, eras);
ws                313 lib/reed_solomon/test_rslib.c static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
ws                331 lib/reed_solomon/test_rslib.c 			test_uc(rs, len, errs, eras, trials, &stat, ws, method);
ws                349 lib/reed_solomon/test_rslib.c static int exercise_rs(struct rs_control *rs, struct wspace *ws,
ws                360 lib/reed_solomon/test_rslib.c 		retval |= ex_rs_helper(rs, ws, len, trials, i);
ws                368 lib/reed_solomon/test_rslib.c 		struct wspace *ws)
ws                372 lib/reed_solomon/test_rslib.c 	int *derrlocs = ws->derrlocs;
ws                373 lib/reed_solomon/test_rslib.c 	uint16_t *corr = ws->corr;
ws                374 lib/reed_solomon/test_rslib.c 	uint16_t *r = ws->r;
ws                378 lib/reed_solomon/test_rslib.c 		get_rcw_we(rs, ws, len, errs, eras);
ws                407 lib/reed_solomon/test_rslib.c static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
ws                424 lib/reed_solomon/test_rslib.c 			test_bc(rs, len, errs, eras, trials, &stat, ws);
ws                450 lib/reed_solomon/test_rslib.c 	struct wspace *ws;
ws                457 lib/reed_solomon/test_rslib.c 	ws = alloc_ws(rsc->codec);
ws                458 lib/reed_solomon/test_rslib.c 	if (!ws)
ws                475 lib/reed_solomon/test_rslib.c 		retval |= exercise_rs(rsc, ws, len, e->ntrials);
ws                477 lib/reed_solomon/test_rslib.c 			retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
ws                480 lib/reed_solomon/test_rslib.c 	free_ws(ws);
ws                395 lib/sbitmap.c  	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
ws                396 lib/sbitmap.c  	if (!sbq->ws) {
ws                403 lib/sbitmap.c  		init_waitqueue_head(&sbq->ws[i].wait);
ws                404 lib/sbitmap.c  		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
ws                427 lib/sbitmap.c  			atomic_set(&sbq->ws[i].wait_cnt, 1);
ws                514 lib/sbitmap.c  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
ws                516 lib/sbitmap.c  		if (waitqueue_active(&ws->wait)) {
ws                519 lib/sbitmap.c  			return ws;
ws                530 lib/sbitmap.c  	struct sbq_wait_state *ws;
ws                534 lib/sbitmap.c  	ws = sbq_wake_ptr(sbq);
ws                535 lib/sbitmap.c  	if (!ws)
ws                538 lib/sbitmap.c  	wait_cnt = atomic_dec_return(&ws->wait_cnt);
ws                556 lib/sbitmap.c  		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
ws                559 lib/sbitmap.c  			wake_up_nr(&ws->wait, wake_batch);
ws                617 lib/sbitmap.c  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
ws                619 lib/sbitmap.c  		if (waitqueue_active(&ws->wait))
ws                620 lib/sbitmap.c  			wake_up(&ws->wait);
ws                650 lib/sbitmap.c  		struct sbq_wait_state *ws = &sbq->ws[i];
ws                653 lib/sbitmap.c  			   atomic_read(&ws->wait_cnt),
ws                654 lib/sbitmap.c  			   waitqueue_active(&ws->wait) ? "active" : "inactive");
ws                664 lib/sbitmap.c  			    struct sbq_wait_state *ws,
ws                670 lib/sbitmap.c  		add_wait_queue(&ws->wait, &sbq_wait->wait);
ws                686 lib/sbitmap.c  			     struct sbq_wait_state *ws,
ws                693 lib/sbitmap.c  	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
ws                697 lib/sbitmap.c  void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
ws                700 lib/sbitmap.c  	finish_wait(&ws->wait, &sbq_wait->wait);
ws                436 net/ipv4/tcp_output.c 	u8 ws;			/* window scale, 0 to disable */
ws                504 net/ipv4/tcp_output.c 			       opts->ws);
ws                629 net/ipv4/tcp_output.c 		opts->ws = tp->rx_opt.rcv_wscale;
ws                689 net/ipv4/tcp_output.c 		opts->ws = ireq->rcv_wscale;
ws                 15 sound/core/pcm_iec958.c 	unsigned int fs, ws;
ws                 49 sound/core/pcm_iec958.c 			ws = IEC958_AES4_CON_WORDLEN_20_16;
ws                 52 sound/core/pcm_iec958.c 			ws = IEC958_AES4_CON_WORDLEN_22_18;
ws                 55 sound/core/pcm_iec958.c 			ws = IEC958_AES4_CON_WORDLEN_20_16 |
ws                 60 sound/core/pcm_iec958.c 			ws = IEC958_AES4_CON_WORDLEN_24_20 |
ws                 77 sound/core/pcm_iec958.c 		cs[4] = ws;
ws                 85 sound/soc/sh/rcar/adg.c 	int ws = id;
ws                 92 sound/soc/sh/rcar/adg.c 			ws = 0;
ws                 95 sound/soc/sh/rcar/adg.c 			ws = 3;
ws                 98 sound/soc/sh/rcar/adg.c 			ws = 7;
ws                103 sound/soc/sh/rcar/adg.c 	return (0x6 + ws) << 8;
ws                173 tools/bpf/bpftool/main.c 	static const char ws[] = " \t\r\n";
ws                179 tools/bpf/bpftool/main.c 		cp += strspn(cp, ws);
ws                205 tools/bpf/bpftool/main.c 			cp += strcspn(cp, ws);
ws                 82 tools/lib/subcmd/help.c static void get_term_dimensions(struct winsize *ws)
ws                 87 tools/lib/subcmd/help.c 		ws->ws_row = atoi(s);
ws                 90 tools/lib/subcmd/help.c 			ws->ws_col = atoi(s);
ws                 91 tools/lib/subcmd/help.c 			if (ws->ws_row && ws->ws_col)
ws                 96 tools/lib/subcmd/help.c 	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
ws                 97 tools/lib/subcmd/help.c 	    ws->ws_row && ws->ws_col)
ws                100 tools/lib/subcmd/help.c 	ws->ws_row = 25;
ws                101 tools/lib/subcmd/help.c 	ws->ws_col = 80;
ws               1969 tools/perf/util/parse-events.c 	struct winsize ws;
ws               1971 tools/perf/util/parse-events.c 	get_term_dimensions(&ws);
ws               1972 tools/perf/util/parse-events.c 	return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
ws                  8 tools/perf/util/term.c void get_term_dimensions(struct winsize *ws)
ws                 13 tools/perf/util/term.c 		ws->ws_row = atoi(s);
ws                 16 tools/perf/util/term.c 			ws->ws_col = atoi(s);
ws                 17 tools/perf/util/term.c 			if (ws->ws_row && ws->ws_col)
ws                 22 tools/perf/util/term.c 	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
ws                 23 tools/perf/util/term.c 	    ws->ws_row && ws->ws_col)
ws                 26 tools/perf/util/term.c 	ws->ws_row = 25;
ws                 27 tools/perf/util/term.c 	ws->ws_col = 80;
ws                  8 tools/perf/util/term.h void get_term_dimensions(struct winsize *ws);