~hww3/illumos-joyent

027bcc9f64a0a5915089267b0dc54c9ee05782b0 — Toomas Soome 6 years ago 4d95620
11797 i86pc: cast between incompatible function types
Reviewed by: John Levon <john.levon@joyent.com>
Approved by: Dan McDonald <danmcd@joyent.com>
M usr/src/uts/common/os/cap_util.c => usr/src/uts/common/os/cap_util.c +3 -3
@@ 1298,7 1298,7 @@ static void
cu_cpu_kstat_create(pghw_t *pg, cu_cntr_info_t *cntr_info)
{
	kstat_t		*ks;
	char 		*sharing = pghw_type_string(pg->pghw_hw);
	char		*sharing = pghw_type_string(pg->pghw_hw);
	char		name[KSTAT_STRLEN + 1];

	/*


@@ 1417,7 1417,7 @@ cu_cpu_run(cpu_t *cp, cu_cpu_func_t func, uintptr_t arg)
	 * cpu_call() will call func on the CPU specified with given argument
	 * and return func's return value in last argument
	 */
	cpu_call(cp, (cpu_call_func_t)func, arg, (uintptr_t)&error);
	cpu_call(cp, (cpu_call_func_t)(uintptr_t)func, arg, (uintptr_t)&error);
	return (error);
}



@@ 1471,7 1471,7 @@ cu_cpu_update(struct cpu *cp, boolean_t move_to)
	 */
	retval = 0;
	if (move_to)
		(void) cu_cpu_run(cp, (cu_cpu_func_t)kcpc_read,
		(void) cu_cpu_run(cp, (cu_cpu_func_t)(uintptr_t)kcpc_read,
		    (uintptr_t)cu_cpu_update_stats);
	else {
		retval = kcpc_read((kcpc_update_func_t)cu_cpu_update_stats);

M usr/src/uts/common/vm/seg_kmem.c => usr/src/uts/common/vm/seg_kmem.c +6 -6
@@ 435,12 435,12 @@ segkmem_badop()
	panic("segkmem_badop");
}

#define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
#define	SEGKMEM_BADOP(t)	(t(*)())(uintptr_t)segkmem_badop

/*ARGSUSED*/
static faultcode_t
segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
	enum fault_type type, enum seg_rw rw)
    enum fault_type type, enum seg_rw rw)
{
	pgcnt_t npages;
	spgcnt_t pg;


@@ 677,7 677,7 @@ segkmem_dump(struct seg *seg)
/*ARGSUSED*/
static int
segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
	page_t ***ppp, enum lock_type type, enum seg_rw rw)
    page_t ***ppp, enum lock_type type, enum seg_rw rw)
{
	page_t **pplist, *pp;
	pgcnt_t npages;


@@ 858,7 858,7 @@ segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
 */
void *
segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
    page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
{
	page_t *ppl;
	caddr_t addr = inaddr;


@@ 1222,7 1222,7 @@ static void
segkmem_free_one_lp(caddr_t addr, size_t size)
{
	page_t		*pp, *rootpp = NULL;
	pgcnt_t 	pgs_left = btopr(size);
	pgcnt_t		pgs_left = btopr(size);

	ASSERT(size == segkmem_lpsize);



@@ 1422,7 1422,7 @@ segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
	pgcnt_t		nlpages = size >> segkmem_lpshift;
	size_t		lpsize = segkmem_lpsize;
	caddr_t		addr = inaddr;
	pgcnt_t 	npages = btopr(size);
	pgcnt_t		npages = btopr(size);
	int		i;

	ASSERT(vmp == heap_lp_arena);

M usr/src/uts/i86pc/io/cbe.c => usr/src/uts/i86pc/io/cbe.c +10 -10
@@ 68,15 68,15 @@ extern int tsc_gethrtime_enable;

void cbe_hres_tick(void);

int
cbe_softclock(void)
uint_t
cbe_softclock(caddr_t arg1 __unused, caddr_t arg2 __unused)
{
	cyclic_softint(CPU, CY_LOCK_LEVEL);
	return (1);
}

int
cbe_low_level(void)
uint_t
cbe_low_level(caddr_t arg1 __unused, caddr_t arg2 __unused)
{
	cpu_t *cpu = CPU;



@@ 90,8 90,8 @@ cbe_low_level(void)
 * spurious calls, it would not matter if we called cyclic_fire() in both
 * cases.
 */
int
cbe_fire(void)
uint_t
cbe_fire(caddr_t arg1 __unused, caddr_t arg2 __unused)
{
	cpu_t *cpu = CPU;
	processorid_t me = cpu->cpu_id, i;


@@ 346,21 346,21 @@ cbe_init(void)
	cyclic_init(&cbe, cbe_timer_resolution);
	mutex_exit(&cpu_lock);

	(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
	(void) add_avintr(NULL, CBE_HIGH_PIL, cbe_fire,
	    "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL);

	if (psm_get_ipivect != NULL) {
		(void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire,
		(void) add_avintr(NULL, CBE_HIGH_PIL, cbe_fire,
		    "cbe_fire_slave",
		    (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI),
		    0, NULL, NULL, NULL);
	}

	(void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL,
	    (avfunc)cbe_softclock, "softclock", NULL, NULL);
	    cbe_softclock, "softclock", NULL, NULL);

	(void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL,
	    (avfunc)cbe_low_level, "low level", NULL, NULL);
	    cbe_low_level, "low level", NULL, NULL);

	mutex_enter(&cpu_lock);


M usr/src/uts/i86pc/os/cpupm/cpupm_throttle.c => usr/src/uts/i86pc/os/cpupm/cpupm_throttle.c +12 -7
@@ 153,9 153,11 @@ read_status(cpu_acpi_handle_t handle, uint32_t *stat)
/*
 * Transition the current processor to the requested throttling state.
 */
static void
cpupm_tstate_transition(uint32_t req_state)
static int
cpupm_tstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
    xc_arg_t arg3 __unused)
{
	uint32_t req_state = arg1;
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;


@@ 174,7 176,7 @@ cpupm_tstate_transition(uint32_t req_state)
	 */
	ctrl = CPU_ACPI_TSTATE_CTRL(req_tstate);
	if (write_ctrl(handle, ctrl) != 0) {
		return;
		return (0);
	}

	/*


@@ 182,7 184,7 @@ cpupm_tstate_transition(uint32_t req_state)
	 * no status value comparison is required.
	 */
	if (CPU_ACPI_TSTATE_STAT(req_tstate) == 0) {
		return;
		return (0);
	}

	/* Wait until switch is complete, but bound the loop just in case. */


@@ 197,11 199,14 @@ cpupm_tstate_transition(uint32_t req_state)
	if (CPU_ACPI_TSTATE_STAT(req_tstate) != stat) {
		DTRACE_PROBE(throttle_transition_incomplete);
	}
	return (0);
}

static void
cpupm_throttle(cpuset_t set,  uint32_t throtl_lvl)
{
	xc_arg_t xc_arg = (xc_arg_t)throtl_lvl;

	/*
	 * If thread is already running on target CPU then just
	 * make the transition request. Otherwise, we'll need to


@@ 209,12 214,12 @@ cpupm_throttle(cpuset_t set,  uint32_t throtl_lvl)
	 */
	kpreempt_disable();
	if (CPU_IN_SET(set, CPU->cpu_id)) {
		cpupm_tstate_transition(throtl_lvl);
		cpupm_tstate_transition(xc_arg, 0, 0);
		CPUSET_DEL(set, CPU->cpu_id);
	}
	if (!CPUSET_ISNULL(set)) {
		xc_call((xc_arg_t)throtl_lvl, 0, 0,
		    CPUSET2BV(set), (xc_func_t)cpupm_tstate_transition);
		xc_call(xc_arg, 0, 0,
		    CPUSET2BV(set), cpupm_tstate_transition);
	}
	kpreempt_enable();
}

M usr/src/uts/i86pc/os/cpupm/pwrnow.c => usr/src/uts/i86pc/os/cpupm/pwrnow.c +7 -4
@@ 110,9 110,11 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
/*
 * Transition the current processor to the requested state.
 */
static void
pwrnow_pstate_transition(uint32_t req_state)
static int
pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
    xc_arg_t arg3 __unused)
{
	uint32_t req_state = (uint32_t)arg1;
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;


@@ 137,6 139,7 @@ pwrnow_pstate_transition(uint32_t req_state)

	mach_state->ms_pstate.cma_state.pstate = req_state;
	cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
	return (0);
}

static void


@@ 149,12 152,12 @@ pwrnow_power(cpuset_t set, uint32_t req_state)
	 */
	kpreempt_disable();
	if (CPU_IN_SET(set, CPU->cpu_id)) {
		pwrnow_pstate_transition(req_state);
		(void) pwrnow_pstate_transition(req_state, 0, 0);
		CPUSET_DEL(set, CPU->cpu_id);
	}
	if (!CPUSET_ISNULL(set)) {
		xc_call((xc_arg_t)req_state, 0, 0,
		    CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition);
		    CPUSET2BV(set), pwrnow_pstate_transition);
	}
	kpreempt_enable();
}

M usr/src/uts/i86pc/os/cpupm/speedstep.c => usr/src/uts/i86pc/os/cpupm/speedstep.c +7 -4
@@ 126,9 126,11 @@ write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
/*
 * Transition the current processor to the requested state.
 */
void
speedstep_pstate_transition(uint32_t req_state)
int
speedstep_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
    xc_arg_t arg3 __unused)
{
	uint32_t req_state = (uint32_t)arg1;
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;


@@ 152,6 154,7 @@ speedstep_pstate_transition(uint32_t req_state)

	mach_state->ms_pstate.cma_state.pstate = req_state;
	cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
	return (0);
}

static void


@@ 164,12 167,12 @@ speedstep_power(cpuset_t set, uint32_t req_state)
	 */
	kpreempt_disable();
	if (CPU_IN_SET(set, CPU->cpu_id)) {
		speedstep_pstate_transition(req_state);
		(void) speedstep_pstate_transition(req_state, 0, 0);
		CPUSET_DEL(set, CPU->cpu_id);
	}
	if (!CPUSET_ISNULL(set)) {
		xc_call((xc_arg_t)req_state, 0, 0, CPUSET2BV(set),
		    (xc_func_t)speedstep_pstate_transition);
		    speedstep_pstate_transition);
	}
	kpreempt_enable();
}

M usr/src/uts/i86pc/os/dtrace_subr.c => usr/src/uts/i86pc/os/dtrace_subr.c +4 -3
@@ 134,9 134,10 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
}

static int
dtrace_xcall_func(dtrace_xcall_t func, void *arg)
dtrace_xcall_func(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3 __unused)
{
	(*func)(arg);
	dtrace_xcall_t func = (dtrace_xcall_t)arg1;
	(*func)((void*)arg2);

	return (0);
}


@@ 157,7 158,7 @@ dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)

	kpreempt_disable();
	xc_sync((xc_arg_t)func, (xc_arg_t)arg, 0, CPUSET2BV(set),
	    (xc_func_t)dtrace_xcall_func);
	    dtrace_xcall_func);
	kpreempt_enable();
}


M usr/src/uts/i86pc/os/fastboot.c => usr/src/uts/i86pc/os/fastboot.c +4 -3
@@ 1293,8 1293,9 @@ err_out:

/* ARGSUSED */
static int
fastboot_xc_func(fastboot_info_t *nk, xc_arg_t unused2, xc_arg_t unused3)
fastboot_xc_func(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
{
	fastboot_info_t *nk = (fastboot_info_t *)arg1;
	void (*fastboot_func)(fastboot_info_t *);
	fastboot_file_t	*fb = &nk->fi_files[FASTBOOT_SWTCH];
	fastboot_func = (void (*)())(fb->fb_va);


@@ 1372,11 1373,11 @@ fast_reboot()
		CPUSET_ZERO(cpuset);
		CPUSET_ADD(cpuset, bootcpuid);
		xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset),
		    (xc_func_t)fastboot_xc_func);
		    fastboot_xc_func);

		panic_idle();
	} else
		(void) fastboot_xc_func(&newkernel, 0, 0);
		(void) fastboot_xc_func((xc_arg_t)&newkernel, 0, 0);
}



M usr/src/uts/i86pc/os/machdep.c => usr/src/uts/i86pc/os/machdep.c +1 -1
@@ 403,7 403,7 @@ stop_other_cpus(void)
	cpuset_t xcset;

	CPUSET_ALL_BUT(xcset, CPU->cpu_id);
	xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)mach_cpu_halt);
	xc_priority(0, 0, 0, CPUSET2BV(xcset), mach_cpu_halt);
	restore_int_flag(s);
}


M usr/src/uts/i86pc/os/mp_call.c => usr/src/uts/i86pc/os/mp_call.c +2 -2
@@ 37,7 37,7 @@

/*
 * Interrupt another CPU.
 * 	This is useful to make the other CPU go through a trap so that
 *	This is useful to make the other CPU go through a trap so that
 *	it recognizes an address space trap (AST) for preempting a thread.
 *
 *	It is possible to be preempted here and be resumed on the CPU


@@ 87,7 87,7 @@ cpu_call(cpu_t *cp, cpu_call_func_t func, uintptr_t arg1, uintptr_t arg2)
	} else {
		CPUSET_ONLY(set, cp->cpu_id);
		xc_call((xc_arg_t)arg1, (xc_arg_t)arg2, 0, CPUSET2BV(set),
		    (xc_func_t)func);
		    (xc_func_t)(uintptr_t)func);
	}
	kpreempt_enable();
}

M usr/src/uts/i86pc/os/mp_pc.c => usr/src/uts/i86pc/os/mp_pc.c +5 -2
@@ 440,15 440,18 @@ mach_cpucontext_free(struct cpu *cp, void *arg, int err)
/*
 * "Enter monitor."  Called via cross-call from stop_other_cpus().
 */
void
mach_cpu_halt(char *msg)
int
mach_cpu_halt(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
{
	char *msg = (char *)arg1;

	if (msg)
		prom_printf("%s\n", msg);

	/*CONSTANTCONDITION*/
	while (1)
		;
	return (0);
}

void

M usr/src/uts/i86pc/os/startup.c => usr/src/uts/i86pc/os/startup.c +1 -1
@@ 2360,7 2360,7 @@ startup_end(void)
	 */
	for (i = DDI_IPL_1; i <= DDI_IPL_10; i++) {
		(void) add_avsoftintr((void *)&softlevel_hdl[i-1], i,
		    (avfunc)ddi_periodic_softintr, "ddi_periodic",
		    (avfunc)(uintptr_t)ddi_periodic_softintr, "ddi_periodic",
		    (caddr_t)(uintptr_t)i, NULL);
	}


M usr/src/uts/i86pc/sys/machsystm.h => usr/src/uts/i86pc/sys/machsystm.h +3 -3
@@ 70,7 70,7 @@ typedef struct mach_cpu_add_arg {
} mach_cpu_add_arg_t;

extern void mach_cpu_idle(void);
extern void mach_cpu_halt(char *);
extern int mach_cpu_halt(xc_arg_t, xc_arg_t, xc_arg_t);
extern int mach_cpu_start(cpu_t *, void *);
extern int mach_cpuid_start(processorid_t, void *);
extern int mach_cpu_stop(cpu_t *, void *);


@@ 106,8 106,8 @@ struct memconf {

struct system_hardware {
	int		hd_nodes;		/* number of nodes */
	int		hd_cpus_per_node; 	/* max cpus in a node */
	struct memconf 	hd_mem[MAXNODES];
	int		hd_cpus_per_node;	/* max cpus in a node */
	struct memconf	hd_mem[MAXNODES];
						/*
						 * memory layout for each
						 * node.

M usr/src/uts/i86xpv/os/mp_xen.c => usr/src/uts/i86xpv/os/mp_xen.c +5 -2
@@ 558,12 558,15 @@ mach_cpu_pause(volatile char *safe)
	}
}

void
mach_cpu_halt(char *msg)
int
mach_cpu_halt(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
{
	char *msg = (char *)arg1;

	if (msg)
		prom_printf("%s\n", msg);
	(void) xen_vcpu_down(CPU->cpu_id);
	return (0);
}

/*ARGSUSED*/

M usr/src/uts/intel/ia32/os/desctbls.c => usr/src/uts/intel/ia32/os/desctbls.c +6 -6
@@ 103,7 103,7 @@ user_desc_t	*gdt0;
desctbr_t	gdt0_default_r;
#endif

gate_desc_t	*idt0; 		/* interrupt descriptor table */
gate_desc_t	*idt0;		/* interrupt descriptor table */
#if defined(__i386)
desctbr_t	idt0_default_r;		/* describes idt0 in IDTR format */
#endif


@@ 147,10 147,10 @@ void (*(fasttable[]))(void) = {
	fast_null,			/* T_FNULL routine */
	fast_null,			/* T_FGETFP routine (initially null) */
	fast_null,			/* T_FSETFP routine (initially null) */
	(void (*)())get_hrtime,		/* T_GETHRTIME */
	(void (*)())gethrvtime,		/* T_GETHRVTIME */
	(void (*)())get_hrestime,	/* T_GETHRESTIME */
	(void (*)())getlgrp		/* T_GETLGRP */
	(void (*)())(uintptr_t)get_hrtime,	/* T_GETHRTIME */
	(void (*)())(uintptr_t)gethrvtime,	/* T_GETHRVTIME */
	(void (*)())(uintptr_t)get_hrestime,	/* T_GETHRESTIME */
	(void (*)())(uintptr_t)getlgrp		/* T_GETLGRP */
};

/*


@@ 1342,7 1342,7 @@ void
brand_interpositioning_enable(void)
{
	gate_desc_t	*idt = CPU->cpu_idt;
	int 		i;
	int		i;

	ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL);


M usr/src/uts/intel/kdi/kdi_idt.c => usr/src/uts/intel/kdi/kdi_idt.c +6 -5
@@ 298,7 298,8 @@ kdi_cpu_init(void)
 * loaded at boot.
 */
static int
kdi_cpu_activate(void)
kdi_cpu_activate(xc_arg_t arg1 __unused, xc_arg_t arg2 __unused,
    xc_arg_t arg3 __unused)
{
	kdi_idt_gates_install(KCS_SEL, KDI_IDT_SAVE);
	return (0);


@@ 346,13 347,13 @@ kdi_activate(kdi_main_t main, kdi_cpusave_t *cpusave, uint_t ncpusave)
	if (boothowto & RB_KMDB) {
		kdi_idt_gates_install(KMDBCODE_SEL, KDI_IDT_NOSAVE);
	} else {
		xc_call(0, 0, 0, CPUSET2BV(cpuset),
		    (xc_func_t)kdi_cpu_activate);
		xc_call(0, 0, 0, CPUSET2BV(cpuset), kdi_cpu_activate);
	}
}

static int
kdi_cpu_deactivate(void)
kdi_cpu_deactivate(xc_arg_t arg1 __unused, xc_arg_t arg2 __unused,
    xc_arg_t arg3 __unused)
{
	kdi_idt_gates_restore();
	return (0);


@@ 364,7 365,7 @@ kdi_deactivate(void)
	cpuset_t cpuset;
	CPUSET_ALL(cpuset);

	xc_call(0, 0, 0, CPUSET2BV(cpuset), (xc_func_t)kdi_cpu_deactivate);
	xc_call(0, 0, 0, CPUSET2BV(cpuset), kdi_cpu_deactivate);
	kdi_nmemranges = 0;
}