#include <mach/mach_types.h>
#include <machine/machine_routines.h>
#include <kern/kalloc.h>
#include <kern/debug.h>
#include <kern/thread.h>
#include <sys/errno.h>
#include <chud/chud_xnu.h>
#include <kperf/kperf.h>
#include <kperf/buffer.h>
#include <kperf/timetrigger.h>
#include <kperf/threadinfo.h>
#include <kperf/callstack.h>
#include <kperf/sample.h>
#include <kperf/filter.h>
#include <kperf/action.h>
#include <kperf/context.h>
#include <kperf/ast.h>
#define ACTION_MAX 32
struct action
{
unsigned sample;
};
static unsigned actionc = 0;
static struct action *actionv = NULL;
static kern_return_t
kperf_sample_internal( struct kperf_sample *sbuf,
struct kperf_context *context,
unsigned sample_what, boolean_t pend_user )
{
boolean_t enabled;
int did_ucallstack = 0, did_tinfo_extra = 0;
if( sample_what == 0 )
return SAMPLE_CONTINUE;
int is_kernel = (context->cur_pid == 0);
if( sample_what & SAMPLER_TINFO ) {
kperf_threadinfo_sample( &sbuf->threadinfo, context );
if (sbuf->threadinfo.runmode & 0x40)
return SAMPLE_CONTINUE;
}
if( sample_what & SAMPLER_KSTACK )
kperf_kcallstack_sample( &sbuf->kcallstack, context );
if ( !is_kernel ) {
if( pend_user )
{
if( sample_what & SAMPLER_USTACK )
did_ucallstack = kperf_ucallstack_pend( context );
if( sample_what & SAMPLER_TINFOEX )
did_tinfo_extra = kperf_threadinfo_extra_pend( context );
}
else
{
if( sample_what & SAMPLER_USTACK )
kperf_ucallstack_sample( &sbuf->ucallstack, context );
if( sample_what & SAMPLER_TINFOEX )
kperf_threadinfo_extra_sample( &sbuf->tinfo_ex,
context );
}
}
enabled = ml_set_interrupts_enabled(FALSE);
if ( pend_user )
BUF_DATA1( PERF_GEN_EVENT | DBG_FUNC_START, sample_what );
if( sample_what & SAMPLER_TINFO )
kperf_threadinfo_log( &sbuf->threadinfo );
if( sample_what & SAMPLER_KSTACK )
kperf_kcallstack_log( &sbuf->kcallstack );
if ( !is_kernel ) {
if ( pend_user )
{
if ( did_ucallstack )
BUF_INFO1( PERF_CS_UPEND, 0 );
if ( did_tinfo_extra )
BUF_INFO1( PERF_TI_XPEND, 0 );
}
else
{
if( sample_what & SAMPLER_USTACK )
kperf_ucallstack_log( &sbuf->ucallstack );
if( sample_what & SAMPLER_TINFOEX )
kperf_threadinfo_extra_log( &sbuf->tinfo_ex );
}
}
if ( pend_user )
BUF_DATA1( PERF_GEN_EVENT | DBG_FUNC_END, sample_what );
ml_set_interrupts_enabled(enabled);
return SAMPLE_CONTINUE;
}
kern_return_t
kperf_sample( struct kperf_sample *sbuf,
struct kperf_context *context,
unsigned actionid, boolean_t pend_user )
{
unsigned sample_what = 0;
if( kperf_sampling_status() == KPERF_SAMPLING_OFF )
panic("trigger fired while sampling off");
else if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN )
return SAMPLE_SHUTDOWN;
if( actionid >= actionc )
return SAMPLE_SHUTDOWN;
sample_what = actionv[actionid].sample;
return kperf_sample_internal( sbuf, context, sample_what, pend_user );
}
void
kperf_thread_ast_handler( thread_t thread )
{
int r;
uint32_t t_chud;
unsigned sample_what = 0;
task_t task = NULL;
if( kperf_sampling_status() != KPERF_SAMPLING_ON )
return;
BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_START, thread);
struct kperf_sample *sbuf = kalloc( sizeof(*sbuf) );
if( sbuf == NULL )
{
BUF_INFO1( PERF_AST_ERROR, 0 );
goto error;
}
struct kperf_context ctx;
ctx.cur_thread = thread;
ctx.cur_pid = -1;
task = chudxnu_task_for_thread(thread);
if(task)
ctx.cur_pid = chudxnu_pid_for_task(task);
t_chud = kperf_get_thread_bits(thread);
if (t_chud & T_AST_NAME)
sample_what |= SAMPLER_TINFOEX;
if (t_chud & T_AST_CALLSTACK)
sample_what |= SAMPLER_USTACK;
r = kperf_sample_internal( sbuf, &ctx, sample_what, FALSE );
kfree( sbuf, sizeof(*sbuf) );
error:
BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_END, r);
}
int
kperf_ast_pend( thread_t cur_thread, uint32_t check_bits,
uint32_t set_bits )
{
uint32_t t_chud, set_done = 0;
if( cur_thread != chudxnu_current_thread() )
panic("pending to non-current thread");
t_chud = kperf_get_thread_bits(cur_thread);
if( !(t_chud & check_bits ) )
{
t_chud |= set_bits;
kperf_set_thread_bits(cur_thread, t_chud);
kperf_set_thread_ast( cur_thread );
set_done = 1;
}
return set_done;
}
unsigned
kperf_action_get_count(void)
{
return actionc;
}
int
kperf_action_set_samplers( unsigned actionid, uint32_t samplers )
{
if( actionid >= actionc )
return EINVAL;
actionv[actionid].sample = samplers;
return 0;
}
int
kperf_action_get_samplers( unsigned actionid, uint32_t *samplers_out )
{
if( actionid >= actionc )
return EINVAL;
*samplers_out = actionv[actionid].sample;
return 0;
}
int
kperf_action_set_count(unsigned count)
{
struct action *new_actionv = NULL, *old_actionv = NULL;
unsigned old_count;
if( count == actionc )
return 0;
if( count < actionc )
return EINVAL;
if( count > ACTION_MAX )
return EINVAL;
if( actionc == 0 )
{
int r;
r = kperf_init();
if( r != 0 )
return r;
}
new_actionv = kalloc( count * sizeof(*new_actionv) );
if( new_actionv == NULL )
return ENOMEM;
old_actionv = actionv;
old_count = actionc;
if( old_actionv != NULL )
bcopy( actionv, new_actionv, actionc * sizeof(*actionv) );
bzero( &new_actionv[actionc], (count - old_count) * sizeof(*actionv) );
actionv = new_actionv;
actionc = count;
if( old_actionv != NULL )
kfree( old_actionv, old_count * sizeof(*actionv) );
printf( "kperf: done the alloc\n" );
return 0;
}