oc                378 arch/arm/mach-omap2/display.c 	struct omap_hwmod_opt_clk *oc;
oc                387 arch/arm/mach-omap2/display.c 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
oc                388 arch/arm/mach-omap2/display.c 		if (oc->_clk)
oc                389 arch/arm/mach-omap2/display.c 			clk_prepare_enable(oc->_clk);
oc                414 arch/arm/mach-omap2/display.c 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
oc                415 arch/arm/mach-omap2/display.c 		if (oc->_clk)
oc                416 arch/arm/mach-omap2/display.c 			clk_disable_unprepare(oc->_clk);
oc                942 arch/arm/mach-omap2/omap_hwmod.c 	struct omap_hwmod_opt_clk *oc;
oc                947 arch/arm/mach-omap2/omap_hwmod.c 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) {
oc                948 arch/arm/mach-omap2/omap_hwmod.c 		c = clk_get(NULL, oc->clk);
oc                951 arch/arm/mach-omap2/omap_hwmod.c 				oh->name, oc->clk);
oc                955 arch/arm/mach-omap2/omap_hwmod.c 		oc->_clk = c;
oc                964 arch/arm/mach-omap2/omap_hwmod.c 		clk_prepare(oc->_clk);
oc                972 arch/arm/mach-omap2/omap_hwmod.c 	struct omap_hwmod_opt_clk *oc;
oc                977 arch/arm/mach-omap2/omap_hwmod.c 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
oc                978 arch/arm/mach-omap2/omap_hwmod.c 		if (oc->_clk) {
oc                979 arch/arm/mach-omap2/omap_hwmod.c 			pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
oc                980 arch/arm/mach-omap2/omap_hwmod.c 				 __clk_get_name(oc->_clk));
oc                981 arch/arm/mach-omap2/omap_hwmod.c 			clk_enable(oc->_clk);
oc                987 arch/arm/mach-omap2/omap_hwmod.c 	struct omap_hwmod_opt_clk *oc;
oc                992 arch/arm/mach-omap2/omap_hwmod.c 	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
oc                993 arch/arm/mach-omap2/omap_hwmod.c 		if (oc->_clk) {
oc                994 arch/arm/mach-omap2/omap_hwmod.c 			pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
oc                995 arch/arm/mach-omap2/omap_hwmod.c 				 __clk_get_name(oc->_clk));
oc                996 arch/arm/mach-omap2/omap_hwmod.c 			clk_disable(oc->_clk);
oc                 43 arch/s390/include/asm/eadm.h 	u8 oc:4;
oc                153 arch/s390/include/asm/pci_clp.h 	u8 oc;				/* operation controls */
oc                262 arch/s390/pci/pci_clp.c 		rrb->request.oc = command;
oc               5156 drivers/block/drbd/drbd_receiver.c 	enum drbd_conns oc;
oc               5200 drivers/block/drbd/drbd_receiver.c 	oc = connection->cstate;
oc               5201 drivers/block/drbd/drbd_receiver.c 	if (oc >= C_UNCONNECTED)
oc               5206 drivers/block/drbd/drbd_receiver.c 	if (oc == C_DISCONNECTING)
oc                974 drivers/block/drbd/drbd_state.c is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
oc                977 drivers/block/drbd/drbd_state.c 	if (oc == nc)
oc                981 drivers/block/drbd/drbd_state.c 	if (oc == C_STANDALONE && nc == C_DISCONNECTING)
oc                985 drivers/block/drbd/drbd_state.c 	if (oc == C_STANDALONE && nc != C_UNCONNECTED)
oc                990 drivers/block/drbd/drbd_state.c 	if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
oc                994 drivers/block/drbd/drbd_state.c 	if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
oc                998 drivers/block/drbd/drbd_state.c 	if (oc == C_DISCONNECTING && nc != C_STANDALONE)
oc               2029 drivers/block/drbd/drbd_state.c 	enum drbd_conns oc;
oc               2042 drivers/block/drbd/drbd_state.c 	enum drbd_conns oc = acscw->oc;
oc               2052 drivers/block/drbd/drbd_state.c 	if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
oc               2055 drivers/block/drbd/drbd_state.c 	if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
oc               2289 drivers/block/drbd/drbd_state.c 	enum drbd_conns oc = connection->cstate;
oc               2295 drivers/block/drbd/drbd_state.c 		rv = is_valid_conn_transition(oc, val.conn);
oc               2304 drivers/block/drbd/drbd_state.c 	if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
oc               2347 drivers/block/drbd/drbd_state.c 		acscw->oc = os.conn;
oc               2372 drivers/block/drbd/drbd_state.c 		drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
oc                 39 drivers/net/ethernet/brocade/bna/bfa_cs.h #define bfa_fsm_state_decl(oc, st, otype, etype)			\
oc                 40 drivers/net/ethernet/brocade/bna/bfa_cs.h 	static void oc ## _sm_ ## st(otype * fsm, etype event);		\
oc                 41 drivers/net/ethernet/brocade/bna/bfa_cs.h 	static void oc ## _sm_ ## st ## _entry(otype * fsm)
oc                 98 drivers/net/ethernet/mscc/ocelot_ace.c static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
oc                100 drivers/net/ethernet/mscc/ocelot_ace.c 	return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
oc                103 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
oc                121 drivers/net/ethernet/mscc/ocelot_ace.c 	ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
oc                122 drivers/net/ethernet/mscc/ocelot_ace.c 	readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
oc                128 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
oc                130 drivers/net/ethernet/mscc/ocelot_ace.c 	vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
oc                133 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
oc                138 drivers/net/ethernet/mscc/ocelot_ace.c 		ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
oc                139 drivers/net/ethernet/mscc/ocelot_ace.c 		ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
oc                141 drivers/net/ethernet/mscc/ocelot_ace.c 	ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
oc                144 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
oc                149 drivers/net/ethernet/mscc/ocelot_ace.c 		data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
oc                151 drivers/net/ethernet/mscc/ocelot_ace.c 		data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
oc                153 drivers/net/ethernet/mscc/ocelot_ace.c 	data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
oc                156 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
oc                168 drivers/net/ethernet/mscc/ocelot_ace.c 		ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
oc                171 drivers/net/ethernet/mscc/ocelot_ace.c 		ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
oc                174 drivers/net/ethernet/mscc/ocelot_ace.c static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
oc                179 drivers/net/ethernet/mscc/ocelot_ace.c 		data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
oc                182 drivers/net/ethernet/mscc/ocelot_ace.c 		data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
oc                146 drivers/pinctrl/pinctrl-as3722.c 	FUNCTION_GROUP(oc-pg-sd0, OC_PG_SD0),
oc                147 drivers/pinctrl/pinctrl-as3722.c 	FUNCTION_GROUP(oc-pg-sd6, OC_PG_SD6),
oc                197 drivers/s390/block/scm_blk.c 	msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
oc               1360 drivers/s390/cio/chsc.c 	brinfo_area->oc    = 0; /* Store-network-bridging-information list */
oc                233 drivers/s390/cio/chsc.h 	u8 oc;
oc                187 drivers/scsi/bfa/bfa_cs.h #define bfa_sm_state_decl(oc, st, otype, etype)		\
oc                188 drivers/scsi/bfa/bfa_cs.h 	static void oc ## _sm_ ## st(otype * fsm, etype event)
oc                216 drivers/scsi/bfa/bfa_cs.h #define bfa_fsm_state_decl(oc, st, otype, etype)		\
oc                217 drivers/scsi/bfa/bfa_cs.h 	static void oc ## _sm_ ## st(otype * fsm, etype event);      \
oc                218 drivers/scsi/bfa/bfa_cs.h 	static void oc ## _sm_ ## st ## _entry(otype * fsm)
oc               1901 drivers/thermal/tegra/soctherm.c 	struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
oc               1903 drivers/thermal/tegra/soctherm.c 	if (oc->mode == OC_THROTTLE_MODE_DISABLED)
oc               1907 drivers/thermal/tegra/soctherm.c 	r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
oc               1908 drivers/thermal/tegra/soctherm.c 	r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
oc               1911 drivers/thermal/tegra/soctherm.c 	writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
oc               1912 drivers/thermal/tegra/soctherm.c 	writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
oc               1913 drivers/thermal/tegra/soctherm.c 	writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
oc               1914 drivers/thermal/tegra/soctherm.c 	soctherm_oc_intr_enable(ts, throt, oc->intr_en);
oc                364 drivers/tty/sysrq.c 	struct oom_control oc = {
oc                373 drivers/tty/sysrq.c 	if (!out_of_memory(&oc))
oc               5270 fs/dlm/lock.c  	int error = 0, mstype, err, oc, ou;
oc               5288 fs/dlm/lock.c  		oc = is_overlap_cancel(lkb);
oc               5296 fs/dlm/lock.c  			  dlm_dir_nodeid(r), oc, ou);
oc               5312 fs/dlm/lock.c  		if (oc || ou) {
oc               5322 fs/dlm/lock.c  				if (oc) {
oc               5352 fs/dlm/lock.c  				  dlm_dir_nodeid(r), oc, ou);
oc               5845 fs/nfsd/nfs4state.c 	struct nfsd4_open_confirm *oc = &u->open_confirm;
oc               5859 fs/nfsd/nfs4state.c 					oc->oc_seqid, &oc->oc_req_stateid,
oc               5870 fs/nfsd/nfs4state.c 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
oc               5873 fs/nfsd/nfs4state.c 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
oc               3441 fs/nfsd/nfs4xdr.c nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
oc               3445 fs/nfsd/nfs4xdr.c 	return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
oc                113 include/linux/oom.h extern bool out_of_memory(struct oom_control *oc);
oc               1591 mm/memcontrol.c 	struct oom_control oc = {
oc               1606 mm/memcontrol.c 	ret = should_force_charge() || out_of_memory(&oc);
oc                 67 mm/oom_kill.c  static inline bool is_memcg_oom(struct oom_control *oc)
oc                 69 mm/oom_kill.c  	return oc->memcg != NULL;
oc                 86 mm/oom_kill.c  				struct oom_control *oc)
oc                 90 mm/oom_kill.c  	const nodemask_t *mask = oc->nodemask;
oc                 92 mm/oom_kill.c  	if (is_memcg_oom(oc))
oc                120 mm/oom_kill.c  static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
oc                155 mm/oom_kill.c  static inline bool is_sysrq_oom(struct oom_control *oc)
oc                157 mm/oom_kill.c  	return oc->order == -1;
oc                252 mm/oom_kill.c  static enum oom_constraint constrained_alloc(struct oom_control *oc)
oc                256 mm/oom_kill.c  	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
oc                260 mm/oom_kill.c  	if (is_memcg_oom(oc)) {
oc                261 mm/oom_kill.c  		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
oc                266 mm/oom_kill.c  	oc->totalpages = totalram_pages() + total_swap_pages;
oc                271 mm/oom_kill.c  	if (!oc->zonelist)
oc                278 mm/oom_kill.c  	if (oc->gfp_mask & __GFP_THISNODE)
oc                286 mm/oom_kill.c  	if (oc->nodemask &&
oc                287 mm/oom_kill.c  	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
oc                288 mm/oom_kill.c  		oc->totalpages = total_swap_pages;
oc                289 mm/oom_kill.c  		for_each_node_mask(nid, *oc->nodemask)
oc                290 mm/oom_kill.c  			oc->totalpages += node_present_pages(nid);
oc                295 mm/oom_kill.c  	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
oc                296 mm/oom_kill.c  			high_zoneidx, oc->nodemask)
oc                297 mm/oom_kill.c  		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
oc                301 mm/oom_kill.c  		oc->totalpages = total_swap_pages;
oc                303 mm/oom_kill.c  			oc->totalpages += node_present_pages(nid);
oc                311 mm/oom_kill.c  	struct oom_control *oc = arg;
oc                318 mm/oom_kill.c  	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
oc                327 mm/oom_kill.c  	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
oc                342 mm/oom_kill.c  	points = oom_badness(task, oc->totalpages);
oc                343 mm/oom_kill.c  	if (!points || points < oc->chosen_points)
oc                347 mm/oom_kill.c  	if (oc->chosen)
oc                348 mm/oom_kill.c  		put_task_struct(oc->chosen);
oc                350 mm/oom_kill.c  	oc->chosen = task;
oc                351 mm/oom_kill.c  	oc->chosen_points = points;
oc                355 mm/oom_kill.c  	if (oc->chosen)
oc                356 mm/oom_kill.c  		put_task_struct(oc->chosen);
oc                357 mm/oom_kill.c  	oc->chosen = (void *)-1UL;
oc                365 mm/oom_kill.c  static void select_bad_process(struct oom_control *oc)
oc                367 mm/oom_kill.c  	if (is_memcg_oom(oc))
oc                368 mm/oom_kill.c  		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
oc                374 mm/oom_kill.c  			if (oom_evaluate_task(p, oc))
oc                382 mm/oom_kill.c  	struct oom_control *oc = arg;
oc                389 mm/oom_kill.c  	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
oc                423 mm/oom_kill.c  static void dump_tasks(struct oom_control *oc)
oc                428 mm/oom_kill.c  	if (is_memcg_oom(oc))
oc                429 mm/oom_kill.c  		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
oc                435 mm/oom_kill.c  			dump_task(p, oc);
oc                440 mm/oom_kill.c  static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
oc                444 mm/oom_kill.c  			oom_constraint_text[oc->constraint],
oc                445 mm/oom_kill.c  			nodemask_pr_args(oc->nodemask));
oc                447 mm/oom_kill.c  	mem_cgroup_print_oom_context(oc->memcg, victim);
oc                452 mm/oom_kill.c  static void dump_header(struct oom_control *oc, struct task_struct *p)
oc                455 mm/oom_kill.c  		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
oc                457 mm/oom_kill.c  	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
oc                461 mm/oom_kill.c  	if (is_memcg_oom(oc))
oc                462 mm/oom_kill.c  		mem_cgroup_print_oom_meminfo(oc->memcg);
oc                464 mm/oom_kill.c  		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
oc                469 mm/oom_kill.c  		dump_tasks(oc);
oc                471 mm/oom_kill.c  		dump_oom_summary(oc, p);
oc                951 mm/oom_kill.c  static void oom_kill_process(struct oom_control *oc, const char *message)
oc                953 mm/oom_kill.c  	struct task_struct *victim = oc->chosen;
oc                974 mm/oom_kill.c  		dump_header(oc, victim);
oc                981 mm/oom_kill.c  	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
oc                999 mm/oom_kill.c  static void check_panic_on_oom(struct oom_control *oc)
oc               1009 mm/oom_kill.c  		if (oc->constraint != CONSTRAINT_NONE)
oc               1013 mm/oom_kill.c  	if (is_sysrq_oom(oc))
oc               1015 mm/oom_kill.c  	dump_header(oc, NULL);
oc               1043 mm/oom_kill.c  bool out_of_memory(struct oom_control *oc)
oc               1050 mm/oom_kill.c  	if (!is_memcg_oom(oc)) {
oc               1075 mm/oom_kill.c  	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
oc               1082 mm/oom_kill.c  	oc->constraint = constrained_alloc(oc);
oc               1083 mm/oom_kill.c  	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
oc               1084 mm/oom_kill.c  		oc->nodemask = NULL;
oc               1085 mm/oom_kill.c  	check_panic_on_oom(oc);
oc               1087 mm/oom_kill.c  	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
oc               1089 mm/oom_kill.c  	    oom_cpuset_eligible(current, oc) &&
oc               1092 mm/oom_kill.c  		oc->chosen = current;
oc               1093 mm/oom_kill.c  		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
oc               1097 mm/oom_kill.c  	select_bad_process(oc);
oc               1099 mm/oom_kill.c  	if (!oc->chosen) {
oc               1100 mm/oom_kill.c  		dump_header(oc, NULL);
oc               1107 mm/oom_kill.c  		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
oc               1110 mm/oom_kill.c  	if (oc->chosen && oc->chosen != (void *)-1UL)
oc               1111 mm/oom_kill.c  		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
oc               1113 mm/oom_kill.c  	return !!oc->chosen;
oc               1123 mm/oom_kill.c  	struct oom_control oc = {
oc               1136 mm/oom_kill.c  	out_of_memory(&oc);
oc               3792 mm/page_alloc.c 	struct oom_control oc = {
oc               3860 mm/page_alloc.c 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
oc               1931 security/selinux/ss/services.c 	struct ocontext *oc;
oc               2015 security/selinux/ss/services.c 		oc = args->newp->ocontexts[OCON_ISID];
oc               2016 security/selinux/ss/services.c 		while (oc && oc->sid[0] != SECINITSID_UNLABELED)
oc               2017 security/selinux/ss/services.c 			oc = oc->next;
oc               2019 security/selinux/ss/services.c 		if (!oc) {
oc               2024 security/selinux/ss/services.c 		rc = mls_range_set(newc, &oc->context[0].range);
oc                491 sound/pci/echoaudio/echoaudio.c 	int oc;
oc                498 sound/pci/echoaudio/echoaudio.c 	oc = atomic_read(&chip->opencount);
oc                499 sound/pci/echoaudio/echoaudio.c 	dev_dbg(chip->card->dev, "pcm_close  oc=%d  cs=%d  rs=%d\n", oc,
oc                501 sound/pci/echoaudio/echoaudio.c 	if (oc < 2)
oc                503 sound/pci/echoaudio/echoaudio.c 	if (oc == 0)
oc                505 sound/pci/echoaudio/echoaudio.c 	dev_dbg(chip->card->dev, "pcm_close2 oc=%d  cs=%d  rs=%d\n", oc,
oc                101 sound/soc/codecs/ak4613.c 	u8 oc;
oc                437 sound/soc/codecs/ak4613.c 	snd_soc_component_update_bits(component, OCTRL, OCTRL_MASK, priv->oc);
oc                631 sound/soc/codecs/ak4613.c 			priv->oc |= 1 << i;
oc                324 tools/perf/util/stat-shadow.c 	struct evsel *counter, *leader, **metric_events, *oc;
oc                353 tools/perf/util/stat-shadow.c 				for_each_group_member (oc, leader) {
oc                354 tools/perf/util/stat-shadow.c 					if (!strcasecmp(oc->name, metric_names[i]) &&
oc                355 tools/perf/util/stat-shadow.c 						!oc->collect_stat) {
oc                363 tools/perf/util/stat-shadow.c 				oc = perf_stat__find_event(evsel_list, metric_names[i]);
oc                365 tools/perf/util/stat-shadow.c 			if (!oc) {
oc                386 tools/perf/util/stat-shadow.c 			metric_events[i] = oc;
oc                387 tools/perf/util/stat-shadow.c 			oc->collect_stat = true;