#undef DIAGNOSTIC
#define DIAGNOSTIC 1
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/ubc.h>
#include <sys/mount.h>
#include <sys/vnode.h>
#include <sys/ubc.h>
#include <sys/ucred.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <mach/mach_types.h>
#include <mach/memory_object_types.h>
#include <kern/zalloc.h>
#if DIAGNOSTIC
#if defined(assert)
#undef assert()
#endif
#define assert(cond) \
((void) ((cond) ? 0 : panic("%s:%d (%s)", __FILE__, __LINE__, # cond)))
#else
#include <kern/assert.h>
#endif
struct zone *ubc_info_zone;
static __inline__ void
ubc_lock(struct vnode *vp)
{
simple_lock(&vp->v_interlock);
}
static __inline__ void
ubc_unlock(struct vnode *vp)
{
simple_unlock(&vp->v_interlock);
}
static int
ubc_busy(struct vnode *vp)
{
register struct ubc_info *uip;
if (!UBCINFOEXISTS(vp))
return (0);
uip = vp->v_ubcinfo;
while (ISSET(uip->ui_flags, UI_BUSY)) {
if (uip->ui_owner == (void *)current_act())
return (2);
SET(uip->ui_flags, UI_WANTED);
(void) tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "ubcbusy", 0);
if (!UBCINFOEXISTS(vp))
return (0);
}
uip->ui_owner = (void *)current_act();
SET(uip->ui_flags, UI_BUSY);
return (1);
}
static void
ubc_unbusy(struct vnode *vp)
{
register struct ubc_info *uip;
if (!UBCINFOEXISTS(vp)) {
wakeup((caddr_t)&vp->v_ubcinfo);
return;
}
uip = vp->v_ubcinfo;
CLR(uip->ui_flags, UI_BUSY);
uip->ui_owner = (void *)NULL;
if (ISSET(uip->ui_flags, UI_WANTED)) {
CLR(uip->ui_flags, UI_WANTED);
wakeup((caddr_t)&vp->v_ubcinfo);
}
}
__private_extern__ void
ubc_init()
{
int i;
i = (vm_size_t) sizeof (struct ubc_info);
ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
return;
}
int
ubc_info_init(struct vnode *vp)
{
register struct ubc_info *uip;
void * pager;
struct vattr vattr;
struct proc *p = current_proc();
int error = 0;
kern_return_t kret;
memory_object_control_t control;
if (!UBCISVALID(vp))
return (EINVAL);
ubc_lock(vp);
if (ISSET(vp->v_flag, VUINIT)) {
while (ISSET(vp->v_flag, VUINIT)) {
SET(vp->v_flag, VUWANT);
ubc_unlock(vp);
(void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0);
ubc_lock(vp);
}
ubc_unlock(vp);
return (0);
} else {
SET(vp->v_flag, VUINIT);
}
uip = vp->v_ubcinfo;
if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) {
ubc_unlock(vp);
uip = (struct ubc_info *) zalloc(ubc_info_zone);
uip->ui_pager = MEMORY_OBJECT_NULL;
uip->ui_control = MEMORY_OBJECT_CONTROL_NULL;
uip->ui_flags = UI_INITED;
uip->ui_vnode = vp;
uip->ui_ucred = NOCRED;
uip->ui_refcount = 1;
uip->ui_size = 0;
uip->ui_mapped = 0;
uip->ui_owner = (void *)NULL;
ubc_lock(vp);
}
#if DIAGNOSTIC
else
Debugger("ubc_info_init: already");
#endif
assert(uip->ui_flags != UI_NONE);
assert(uip->ui_vnode == vp);
#if 0
if(ISSET(uip->ui_flags, UI_HASPAGER))
goto done;
#endif
vp->v_ubcinfo = uip;
SET(uip->ui_flags, UI_HASPAGER);
ubc_unlock(vp);
pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
assert(pager);
ubc_setpager(vp, pager);
kret = memory_object_create_named(pager,
(memory_object_size_t)uip->ui_size, &control);
vnode_pager_deallocate(pager);
if (kret != KERN_SUCCESS)
panic("ubc_info_init: memory_object_create_named returned %d", kret);
assert(control);
uip->ui_control = control;
SET(uip->ui_flags, UI_HASOBJREF);
error = vnode_pager_vget(vp);
if (error)
panic("ubc_info_init: vnode_pager_vget error = %d", error);
error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
ubc_lock(vp);
uip->ui_size = (error ? 0: vattr.va_size);
done:
CLR(vp->v_flag, VUINIT);
if (ISSET(vp->v_flag, VUWANT)) {
CLR(vp->v_flag, VUWANT);
ubc_unlock(vp);
wakeup((caddr_t)vp);
} else
ubc_unlock(vp);
return (error);
}
static void
ubc_info_free(struct ubc_info *uip)
{
struct ucred *credp;
credp = uip->ui_ucred;
if (credp != NOCRED) {
uip->ui_ucred = NOCRED;
crfree(credp);
}
if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
memory_object_control_deallocate(uip->ui_control);
zfree(ubc_info_zone, (vm_offset_t)uip);
return;
}
void
ubc_info_deallocate(struct ubc_info *uip)
{
assert(uip->ui_refcount > 0);
if (uip->ui_refcount-- == 1) {
struct vnode *vp;
vp = uip->ui_vnode;
if (ISSET(uip->ui_flags, UI_WANTED)) {
CLR(uip->ui_flags, UI_WANTED);
wakeup((caddr_t)&vp->v_ubcinfo);
}
ubc_info_free(uip);
}
}
int
ubc_setsize(struct vnode *vp, off_t nsize)
{
off_t osize;
off_t lastpg, olastpgend, lastoff;
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret;
if (nsize < (off_t)0)
return (0);
if (UBCINVALID(vp))
return (0);
if (!UBCINFOEXISTS(vp))
return (0);
uip = vp->v_ubcinfo;
osize = uip->ui_size;
uip->ui_size = nsize;
if (nsize >= osize)
return (1);
lastpg = trunc_page_64(nsize);
olastpgend = round_page_64(osize);
control = uip->ui_control;
assert(control);
lastoff = (nsize & PAGE_MASK_64);
if (!lastoff) {
kret = memory_object_lock_request(control,
(memory_object_offset_t)lastpg,
(memory_object_size_t)(olastpgend - lastpg),
MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
kret = memory_object_lock_request(control,
(memory_object_offset_t)lastpg,
PAGE_SIZE_64,
MEMORY_OBJECT_RETURN_DIRTY, FALSE,
VM_PROT_NO_CHANGE);
if (kret == KERN_SUCCESS) {
kret = memory_object_lock_request(control,
(memory_object_offset_t)lastpg,
(memory_object_size_t)(olastpgend - lastpg),
MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
} else
printf("ubc_setsize: flush failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
off_t
ubc_getsize(struct vnode *vp)
{
return (vp->v_ubcinfo->ui_size);
}
int
ubc_uncache(struct vnode *vp)
{
kern_return_t kret;
struct ubc_info *uip;
int recursed;
memory_object_control_t control;
memory_object_perf_info_data_t perf;
if (!UBCINFOEXISTS(vp))
return (0);
if ((recursed = ubc_busy(vp)) == 0)
return (0);
uip = vp->v_ubcinfo;
assert(uip != UBC_INFO_NULL);
vagevp(vp);
SET(uip->ui_flags, UI_DONTCACHE);
control = uip->ui_control;
assert(control);
perf.cluster_size = PAGE_SIZE;
perf.may_cache = FALSE;
kret = memory_object_change_attributes(control,
MEMORY_OBJECT_PERFORMANCE_INFO,
(memory_object_info_t) &perf,
MEMORY_OBJECT_PERF_INFO_COUNT);
if (kret != KERN_SUCCESS) {
printf("ubc_uncache: memory_object_change_attributes_named "
"kret = %d", kret);
if (recursed == 1)
ubc_unbusy(vp);
return (0);
}
ubc_release_named(vp);
if (recursed == 1)
ubc_unbusy(vp);
return (1);
}
__private_extern__ int
ubc_umount(struct mount *mp)
{
struct proc *p = current_proc();
struct vnode *vp, *nvp;
int ret = 1;
loop:
simple_lock(&mntvnode_slock);
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
if (vp->v_mount != mp) {
simple_unlock(&mntvnode_slock);
goto loop;
}
nvp = vp->v_mntvnodes.le_next;
simple_unlock(&mntvnode_slock);
if (UBCINFOEXISTS(vp)) {
if (vget(vp, 0, p)) {
ret = 0;
simple_lock(&mntvnode_slock);
continue;
}
ret &= ubc_clean(vp, 0);
ret &= ubc_uncache(vp);
vrele(vp);
}
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
return (ret);
}
__private_extern__ void
ubc_unmountall()
{
struct mount *mp, *nmp;
for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
nmp = mp->mnt_list.cqe_prev;
(void) ubc_umount(mp);
}
}
struct ucred *
ubc_getcred(struct vnode *vp)
{
struct ubc_info *uip;
uip = vp->v_ubcinfo;
if (UBCINVALID(vp))
return (NOCRED);
return (uip->ui_ucred);
}
int
ubc_setcred(struct vnode *vp, struct proc *p)
{
struct ubc_info *uip;
struct ucred *credp;
uip = vp->v_ubcinfo;
if (UBCINVALID(vp))
return (0);
credp = uip->ui_ucred;
if (credp == NOCRED) {
crhold(p->p_ucred);
uip->ui_ucred = p->p_ucred;
}
return (1);
}
__private_extern__ memory_object_t
ubc_getpager(struct vnode *vp)
{
struct ubc_info *uip;
uip = vp->v_ubcinfo;
if (UBCINVALID(vp))
return (0);
return (uip->ui_pager);
}
memory_object_control_t
ubc_getobject(struct vnode *vp, int flags)
{
struct ubc_info *uip;
int recursed;
memory_object_control_t control;
if (UBCINVALID(vp))
return (0);
if (flags & UBC_FOR_PAGEOUT)
return(vp->v_ubcinfo->ui_control);
if ((recursed = ubc_busy(vp)) == 0)
return (0);
uip = vp->v_ubcinfo;
control = uip->ui_control;
if ((flags & UBC_HOLDOBJECT) && (!ISSET(uip->ui_flags, UI_HASOBJREF))) {
ubc_lock(vp);
uip->ui_refcount++;
ubc_unlock(vp);
if (memory_object_recover_named(control, TRUE) == KERN_SUCCESS) {
SET(uip->ui_flags, UI_HASOBJREF);
} else {
control = MEMORY_OBJECT_CONTROL_NULL;
}
if (recursed == 1)
ubc_unbusy(vp);
ubc_info_deallocate(uip);
} else {
if (recursed == 1)
ubc_unbusy(vp);
}
return (control);
}
int
ubc_setpager(struct vnode *vp, memory_object_t pager)
{
struct ubc_info *uip;
uip = vp->v_ubcinfo;
if (UBCINVALID(vp))
return (0);
uip->ui_pager = pager;
return (1);
}
int
ubc_setflags(struct vnode * vp, int flags)
{
struct ubc_info *uip;
if (UBCINVALID(vp))
return (0);
uip = vp->v_ubcinfo;
SET(uip->ui_flags, flags);
return (1);
}
int
ubc_clearflags(struct vnode * vp, int flags)
{
struct ubc_info *uip;
if (UBCINVALID(vp))
return (0);
uip = vp->v_ubcinfo;
CLR(uip->ui_flags, flags);
return (1);
}
int
ubc_issetflags(struct vnode * vp, int flags)
{
struct ubc_info *uip;
if (UBCINVALID(vp))
return (0);
uip = vp->v_ubcinfo;
return (ISSET(uip->ui_flags, flags));
}
off_t
ubc_blktooff(struct vnode *vp, daddr_t blkno)
{
off_t file_offset;
int error;
if (UBCINVALID(vp))
return ((off_t)-1);
error = VOP_BLKTOOFF(vp, blkno, &file_offset);
if (error)
file_offset = -1;
return (file_offset);
}
daddr_t
ubc_offtoblk(struct vnode *vp, off_t offset)
{
daddr_t blkno;
int error = 0;
if (UBCINVALID(vp)) {
return ((daddr_t)-1);
}
error = VOP_OFFTOBLK(vp, offset, &blkno);
if (error)
blkno = -1;
return (blkno);
}
int
ubc_clean(struct vnode *vp, int invalidate)
{
off_t size;
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret;
int flags = 0;
if (UBCINVALID(vp))
return (0);
if (!UBCINFOEXISTS(vp))
return (0);
if (invalidate)
flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
uip = vp->v_ubcinfo;
size = uip->ui_size;
control = uip->ui_control;
assert(control);
cluster_release(vp);
vp->v_clen = 0;
kret = memory_object_lock_request(control,
(memory_object_offset_t)0,
(memory_object_size_t)round_page_64(size),
MEMORY_OBJECT_RETURN_ALL, flags,
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_clean: clean failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
int
ubc_pushdirty(struct vnode *vp)
{
off_t size;
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret;
if (UBCINVALID(vp))
return (0);
if (!UBCINFOEXISTS(vp))
return (0);
uip = vp->v_ubcinfo;
size = uip->ui_size;
control = uip->ui_control;
assert(control);
vp->v_flag &= ~VHASDIRTY;
vp->v_clen = 0;
kret = memory_object_lock_request(control,
(memory_object_offset_t)0,
(memory_object_size_t)round_page_64(size),
MEMORY_OBJECT_RETURN_DIRTY, FALSE,
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_pushdirty: flush failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
int
ubc_pushdirty_range(struct vnode *vp, off_t offset, off_t size)
{
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret;
if (UBCINVALID(vp))
return (0);
if (!UBCINFOEXISTS(vp))
return (0);
uip = vp->v_ubcinfo;
control = uip->ui_control;
assert(control);
kret = memory_object_lock_request(control,
(memory_object_offset_t)offset,
(memory_object_size_t)round_page_64(size),
MEMORY_OBJECT_RETURN_DIRTY, FALSE,
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_pushdirty_range: flush failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
int
ubc_hold(struct vnode *vp)
{
struct ubc_info *uip;
int recursed;
memory_object_control_t object;
retry:
if (UBCINVALID(vp))
return (0);
ubc_lock(vp);
if (ISSET(vp->v_flag, VUINIT)) {
while (ISSET(vp->v_flag, VUINIT)) {
SET(vp->v_flag, VUWANT);
ubc_unlock(vp);
(void) tsleep((caddr_t)vp, PINOD, "ubchold", 0);
ubc_lock(vp);
}
ubc_unlock(vp);
goto retry;
}
ubc_unlock(vp);
if ((recursed = ubc_busy(vp)) == 0) {
assert(UBCINVALID(vp) ||
((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)));
return (0);
}
uip = vp->v_ubcinfo;
assert(uip->ui_control != MEMORY_OBJECT_CONTROL_NULL);
ubc_lock(vp);
uip->ui_refcount++;
ubc_unlock(vp);
if (!ISSET(uip->ui_flags, UI_HASOBJREF)) {
if (memory_object_recover_named(uip->ui_control, TRUE)
!= KERN_SUCCESS) {
if (recursed == 1)
ubc_unbusy(vp);
ubc_info_deallocate(uip);
return (0);
}
SET(uip->ui_flags, UI_HASOBJREF);
}
if (recursed == 1)
ubc_unbusy(vp);
assert(uip->ui_refcount > 0);
return (1);
}
void
ubc_rele(struct vnode *vp)
{
struct ubc_info *uip;
if (UBCINVALID(vp))
return;
if (!UBCINFOEXISTS(vp)) {
if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE))
return;
panic("ubc_rele: can not");
}
uip = vp->v_ubcinfo;
if (uip->ui_refcount == 1)
panic("ubc_rele: ui_refcount");
--uip->ui_refcount;
if ((uip->ui_refcount == 1)
&& ISSET(uip->ui_flags, UI_DONTCACHE))
(void) ubc_release_named(vp);
return;
}
__private_extern__ void
ubc_map(struct vnode *vp)
{
struct ubc_info *uip;
if (UBCINVALID(vp))
return;
if (!UBCINFOEXISTS(vp))
return;
ubc_lock(vp);
uip = vp->v_ubcinfo;
SET(uip->ui_flags, UI_WASMAPPED);
uip->ui_mapped = 1;
ubc_unlock(vp);
return;
}
int
ubc_release_named(struct vnode *vp)
{
struct ubc_info *uip;
int recursed;
memory_object_control_t control;
kern_return_t kret = KERN_FAILURE;
if (UBCINVALID(vp))
return (0);
if ((recursed = ubc_busy(vp)) == 0)
return (0);
uip = vp->v_ubcinfo;
if (ISSET(uip->ui_flags, UI_HASOBJREF) &&
(uip->ui_refcount == 1) && !uip->ui_mapped) {
control = uip->ui_control;
assert(control);
if (vp->v_flag & VDELETED) {
ubc_setsize(vp, (off_t)0);
}
CLR(uip->ui_flags, UI_HASOBJREF);
kret = memory_object_release_name(control,
MEMORY_OBJECT_RESPECT_CACHE);
}
if (recursed == 1)
ubc_unbusy(vp);
return ((kret != KERN_SUCCESS) ? 0 : 1);
}
int
ubc_release(
struct vnode *vp)
{
return 0;
}
__private_extern__ int
ubc_destroy_named(
struct vnode *vp)
{
memory_object_control_t control;
struct proc *p;
struct ubc_info *uip;
kern_return_t kret;
if (!UBCINFOEXISTS(vp))
return (1);
uip = vp->v_ubcinfo;
if (uip->ui_refcount > 1)
return (0);
control = ubc_getobject(vp, UBC_HOLDOBJECT);
if (control != MEMORY_OBJECT_CONTROL_NULL) {
if (ISSET(vp->v_flag, VTERMINATE))
panic("ubc_destroy_named: already teminating");
SET(vp->v_flag, VTERMINATE);
kret = memory_object_destroy(control, 0);
if (kret != KERN_SUCCESS)
return (0);
while (ISSET(vp->v_flag, VTERMINATE)) {
SET(vp->v_flag, VTERMWANT);
(void)tsleep((caddr_t)&vp->v_ubcinfo,
PINOD, "ubc_destroy_named", 0);
}
}
return (1);
}
int
ubc_invalidate(struct vnode *vp, off_t offset, size_t size)
{
struct ubc_info *uip;
memory_object_control_t control;
kern_return_t kret;
off_t toff;
size_t tsize;
if (UBCINVALID(vp))
return (0);
if (!UBCINFOEXISTS(vp))
return (0);
toff = trunc_page_64(offset);
tsize = (size_t)(round_page_64(offset+size) - toff);
uip = vp->v_ubcinfo;
control = uip->ui_control;
assert(control);
kret = memory_object_lock_request(control,
(memory_object_offset_t)toff,
(memory_object_size_t)tsize,
MEMORY_OBJECT_RETURN_NONE,
(MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH),
VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_invalidate: invalidate failed (error = %d)\n", kret);
return ((kret == KERN_SUCCESS) ? 1 : 0);
}
int
ubc_isinuse(struct vnode *vp, int busycount)
{
if (!UBCINFOEXISTS(vp))
return (0);
if (busycount == 0) {
printf("ubc_isinuse: called without a valid reference"
": v_tag = %d\v", vp->v_tag);
vprint("ubc_isinuse", vp);
return (0);
}
if (vp->v_usecount > busycount+1)
return (1);
if ((vp->v_usecount == busycount+1)
&& (vp->v_ubcinfo->ui_mapped == 1))
return (1);
else
return (0);
}
__private_extern__ void
ubc_unmap(struct vnode *vp)
{
struct ubc_info *uip;
boolean_t funnel_state;
if (UBCINVALID(vp))
return;
if (!UBCINFOEXISTS(vp))
return;
ubc_lock(vp);
uip = vp->v_ubcinfo;
uip->ui_mapped = 0;
if ((uip->ui_refcount > 1) || !ISSET(uip->ui_flags, UI_DONTCACHE)) {
ubc_unlock(vp);
return;
}
ubc_unlock(vp);
funnel_state = thread_funnel_set(kernel_flock, TRUE);
(void) ubc_release_named(vp);
(void) thread_funnel_set(kernel_flock, funnel_state);
}
kern_return_t
ubc_page_op(
struct vnode *vp,
off_t f_offset,
int ops,
ppnum_t *phys_entryp,
int *flagsp)
{
memory_object_control_t control;
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
return (memory_object_page_op(control,
(memory_object_offset_t)f_offset,
ops,
phys_entryp,
flagsp));
}
__private_extern__ kern_return_t
ubc_page_op_with_control(
memory_object_control_t control,
off_t f_offset,
int ops,
ppnum_t *phys_entryp,
int *flagsp)
{
return (memory_object_page_op(control,
(memory_object_offset_t)f_offset,
ops,
phys_entryp,
flagsp));
}
kern_return_t
ubc_range_op(
struct vnode *vp,
off_t f_offset_beg,
off_t f_offset_end,
int ops,
int *range)
{
memory_object_control_t control;
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
return (memory_object_range_op(control,
(memory_object_offset_t)f_offset_beg,
(memory_object_offset_t)f_offset_end,
ops,
range));
}
kern_return_t
ubc_create_upl(
struct vnode *vp,
off_t f_offset,
long bufsize,
upl_t *uplp,
upl_page_info_t **plp,
int uplflags)
{
memory_object_control_t control;
int count;
int ubcflags;
off_t file_offset;
kern_return_t kr;
if (bufsize & 0xfff)
return KERN_INVALID_ARGUMENT;
if (uplflags & UPL_FOR_PAGEOUT) {
uplflags &= ~UPL_FOR_PAGEOUT;
ubcflags = UBC_FOR_PAGEOUT;
} else
ubcflags = UBC_FLAGS_NONE;
control = ubc_getobject(vp, ubcflags);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
if (uplflags & UPL_WILL_BE_DUMPED) {
uplflags &= ~UPL_WILL_BE_DUMPED;
uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
} else
uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
count = 0;
kr = memory_object_upl_request(control, f_offset, bufsize,
uplp, NULL, &count, uplflags);
if (plp != NULL)
*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
return kr;
}
kern_return_t
ubc_upl_map(
upl_t upl,
vm_offset_t *dst_addr)
{
return (vm_upl_map(kernel_map, upl, dst_addr));
}
kern_return_t
ubc_upl_unmap(
upl_t upl)
{
return(vm_upl_unmap(kernel_map, upl));
}
kern_return_t
ubc_upl_commit(
upl_t upl)
{
upl_page_info_t *pl;
kern_return_t kr;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_commit_range(
upl_t upl,
vm_offset_t offset,
vm_size_t size,
int flags)
{
upl_page_info_t *pl;
boolean_t empty;
kern_return_t kr;
if (flags & UPL_COMMIT_FREE_ON_EMPTY)
flags |= UPL_COMMIT_NOTIFY_EMPTY;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
kr = upl_commit_range(upl, offset, size, flags,
pl, MAX_UPL_TRANSFER, &empty);
if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_abort_range(
upl_t upl,
vm_offset_t offset,
vm_size_t size,
int abort_flags)
{
kern_return_t kr;
boolean_t empty = FALSE;
if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
upl_deallocate(upl);
return kr;
}
kern_return_t
ubc_upl_abort(
upl_t upl,
int abort_type)
{
kern_return_t kr;
kr = upl_abort(upl, abort_type);
upl_deallocate(upl);
return kr;
}
upl_page_info_t *
ubc_upl_pageinfo(
upl_t upl)
{
return (UPL_GET_INTERNAL_PAGE_LIST(upl));
}