Lines Matching refs:spu

45 static void spu_buff_add(unsigned long int value, int spu)  in spu_buff_add()  argument
59 if (spu_buff[spu].head >= spu_buff[spu].tail) { in spu_buff_add()
60 if ((spu_buff[spu].head - spu_buff[spu].tail) in spu_buff_add()
64 } else if (spu_buff[spu].tail > spu_buff[spu].head) { in spu_buff_add()
65 if ((spu_buff[spu].tail - spu_buff[spu].head) in spu_buff_add()
71 spu_buff[spu].buff[spu_buff[spu].head] = value; in spu_buff_add()
72 spu_buff[spu].head++; in spu_buff_add()
74 if (spu_buff[spu].head >= max_spu_buff) in spu_buff_add()
75 spu_buff[spu].head = 0; in spu_buff_add()
93 int spu; in sync_spu_buff() local
97 for (spu = 0; spu < num_spu_nodes; spu++) { in sync_spu_buff()
101 if (spu_buff[spu].buff == NULL) in sync_spu_buff()
110 curr_head = spu_buff[spu].head; in sync_spu_buff()
116 oprofile_put_buff(spu_buff[spu].buff, in sync_spu_buff()
117 spu_buff[spu].tail, in sync_spu_buff()
121 spu_buff[spu].tail = curr_head; in sync_spu_buff()
140 struct spu *the_spu; /* needed to access pointer to local_store */
160 static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num) in get_cached_info()
191 prepare_cached_spu_info(struct spu *spu, unsigned long objectId) in prepare_cached_spu_info() argument
201 info = get_cached_info(spu, spu->number); in prepare_cached_spu_info()
219 new_map = create_vma_map(spu, objectId); in prepare_cached_spu_info()
230 info->the_spu = spu; in prepare_cached_spu_info()
233 spu_info[spu->number] = info; in prepare_cached_spu_info()
244 spu_set_profile_private_kref(spu->ctx, &info->cache_ref, in prepare_cached_spu_info()
319 get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, in get_exec_dcookie_and_offset() argument
327 struct mm_struct *mm = spu->mm; in get_exec_dcookie_and_offset()
376 static int process_context_switch(struct spu *spu, unsigned long objectId) in process_context_switch() argument
383 retval = prepare_cached_spu_info(spu, objectId); in process_context_switch()
390 app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId); in process_context_switch()
398 spu_buff_add(ESCAPE_CODE, spu->number); in process_context_switch()
399 spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number); in process_context_switch()
400 spu_buff_add(spu->number, spu->number); in process_context_switch()
401 spu_buff_add(spu->pid, spu->number); in process_context_switch()
402 spu_buff_add(spu->tgid, spu->number); in process_context_switch()
403 spu_buff_add(app_dcookie, spu->number); in process_context_switch()
404 spu_buff_add(spu_cookie, spu->number); in process_context_switch()
405 spu_buff_add(offset, spu->number); in process_context_switch()
411 spu_buff[spu->number].ctx_sw_seen = 1; in process_context_switch()
431 struct spu *the_spu = data; in spu_active_notify()
462 int spu; in oprofile_spu_buff_create() local
466 for (spu = 0; spu < num_spu_nodes; spu++) { in oprofile_spu_buff_create()
470 spu_buff[spu].head = 0; in oprofile_spu_buff_create()
471 spu_buff[spu].tail = 0; in oprofile_spu_buff_create()
479 spu_buff[spu].buff = kzalloc((max_spu_buff in oprofile_spu_buff_create()
483 if (!spu_buff[spu].buff) { in oprofile_spu_buff_create()
487 __func__, __LINE__, spu); in oprofile_spu_buff_create()
490 while (spu >= 0) { in oprofile_spu_buff_create()
491 kfree(spu_buff[spu].buff); in oprofile_spu_buff_create()
492 spu_buff[spu].buff = 0; in oprofile_spu_buff_create()
493 spu--; in oprofile_spu_buff_create()
513 int spu; in spu_sync_start() local
530 for (spu = 0; spu < num_spu_nodes; spu++) { in spu_sync_start()
531 spu_buff_add(ESCAPE_CODE, spu); in spu_sync_start()
532 spu_buff_add(SPU_PROFILING_CODE, spu); in spu_sync_start()
533 spu_buff_add(num_spu_nodes, spu); in spu_sync_start()
537 for (spu = 0; spu < num_spu_nodes; spu++) { in spu_sync_start()
538 spu_buff[spu].ctx_sw_seen = 0; in spu_sync_start()
539 spu_buff[spu].last_guard_val = 0; in spu_sync_start()
562 struct spu *the_spu; in spu_sync_buffer()