head	1.9;
access;
symbols;
locks
	cherry:1.9; strict;
comment	@# @;


1.9
date	2016.12.13.12.56.39;	author cherry;	state Exp;
branches;
next	1.8;

1.8
date	2016.11.20.11.11.46;	author cherry;	state Exp;
branches;
next	1.7;

1.7
date	2016.11.20.11.08.57;	author cherry;	state Exp;
branches;
next	1.6;

1.6
date	2016.11.17.15.45.02;	author cherry;	state Exp;
branches;
next	1.5;

1.5
date	2016.11.13.11.23.05;	author cherry;	state Exp;
branches;
next	1.4;

1.4
date	2016.10.30.17.28.09;	author cherry;	state Exp;
branches;
next	1.3;

1.3
date	2016.10.28.08.32.31;	author cherry;	state Exp;
branches;
next	1.2;

1.2
date	2016.10.28.08.27.28;	author cherry;	state Exp;
branches;
next	1.1;

1.1
date	2016.10.20.14.29.56;	author cherry;	state Exp;
branches;
next	;


desc
@Initial version for fox review
@


1.9
log
@s/physmem/physseg//
remove rump testing and dependency for tests
@
text
@Index: tests/sys/uvm/Makefile
===================================================================
RCS file: tests/sys/uvm/Makefile
diff -N tests/sys/uvm/Makefile
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ tests/sys/uvm/Makefile	13 Dec 2016 12:44:56 -0000
@@@@ -0,0 +1,24 @@@@
+# $NetBSD$
+#
+
+WARNS?=6
+.include <bsd.own.mk>
+
+TESTSDIR=	${TESTSBASE}/sys/uvm
+CPPFLAGS+=	-I${NETBSDSRCDIR}/sys -I${.CURDIR}/ -D_TEST -g
+
+# Depend on the kernel source files too
+DPSRCS=		${NETBSDSRCDIR}/sys/uvm/uvm_physseg.[ch]
+
+.PATH:		${NETBSDSRCDIR}/sys/kern
+TESTS_C+=	t_uvm_physseg
+SRCS.t_uvm_physseg+=	t_uvm_physseg.c subr_extent.c
+CPPFLAGS.t_uvm_physseg.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
+CPPFLAGS.subr_extent.c=	  -D_EXTENT_TESTING -D__POOL_EXPOSE -D_KERNTYPES -DDIAGNOSTIC
+
+TESTS_C+=       t_uvm_physseg_load
+SRCS.t_uvm_physseg_load+=       t_uvm_physseg_load.c subr_extent.c
+CPPFLAGS.t_uvm_physseg_load.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
+
+.include <bsd.dep.mk>
+.include <bsd.test.mk>
Index: tests/sys/uvm/t_uvm_physseg.c
===================================================================
RCS file: tests/sys/uvm/t_uvm_physseg.c
diff -N tests/sys/uvm/t_uvm_physseg.c
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ tests/sys/uvm/t_uvm_physseg.c	13 Dec 2016 12:44:57 -0000
@@@@ -0,0 +1,2199 @@@@
+/* $NetBSD$ */
+
+/*-
+ * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Santhosh N. Raju <santhosh.raju@@gmail.com> and
+ * by Cherry G. Mathew
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__RCSID("$NetBSD$");
+
+/* Testing API - assumes userland */
+/* Provide Kernel API equivalents */
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h> /* memset(3) et. al */
+#include <stdio.h> /* printf(3) */
+#include <stdlib.h> /* malloc(3) */
+#include <stdarg.h>
+#include <stddef.h>
+
+#define	PRIxPADDR	"lx"
+#define	PRIxPSIZE	"lx"
+#define	PRIuPSIZE	"lu"
+#define	PRIxVADDR	"lx"
+#define	PRIxVSIZE	"lx"
+#define	PRIuVSIZE	"lu"
+
+#define UVM_HOTPLUG /* Enable hotplug with rbtree. */
+#define PMAP_STEAL_MEMORY
+#define DEBUG /* Enable debug functionality. */
+
+typedef unsigned long vaddr_t;
+typedef unsigned long paddr_t;
+typedef unsigned long psize_t;
+typedef unsigned long vsize_t;
+
+#include <uvm/uvm_page.h>
+
+/*
+ * If this line is commented out tests related to uvm_physseg_get_pmseg()
+ * wont run.
+ *
+ * Have a look at machine/uvm_physseg.h for more details.
+ */
+#define __HAVE_PMAP_PHYSSEG
+
+#include <uvm/uvm_physseg.h>
+
+/*
+ * This is a dummy struct used for testing purposes
+ *
+ * In reality this struct would exist in the MD part of the code residing in
+ * machines/vmparam.h
+ */
+
+#ifdef __HAVE_PMAP_PHYSSEG
+struct pmap_physseg {
+	bool dummy_variable;		/* Dummy variable use for testing */
+};
+#endif
+
+#ifndef DIAGNOSTIC
+#define	KASSERTMSG(e, msg, ...)	/* NOTHING */
+#define	KASSERT(e)		/* NOTHING */
+#else
+#define	KASSERT(a)		assert(a)
+#define KASSERTMSG(exp, ...)    printf(__VA_ARGS__); assert((exp))
+#endif
+
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+
+#define VM_NFREELIST            4
+#define VM_FREELIST_DEFAULT     0
+#define VM_FREELIST_FIRST16     3
+#define VM_FREELIST_FIRST1G     2
+#define VM_FREELIST_FIRST4G     1
+
+/*
+ * Used in tests when Array implementation is tested
+ */
+#if !defined(VM_PHYSSEG_MAX)
+#define VM_PHYSSEG_MAX          32
+#endif
+
+#define PAGE_SHIFT              12
+#define PAGE_SIZE               (1 << PAGE_SHIFT)
+#define	PAGE_MASK	(PAGE_SIZE - 1)
+#define atop(x)         (((paddr_t)(x)) >> PAGE_SHIFT)
+#define ptoa(x)         (((paddr_t)(x)) << PAGE_SHIFT)
+
+#define	mutex_enter(l)
+#define	mutex_exit(l)
+
+psize_t physmem;
+
+struct uvmexp uvmexp;        /* decl */
+
+/*
+ * uvm structure borrowed from uvm.h
+ *
+ * Remember this is a dummy structure used within the ATF Tests and
+ * uses only necessary fields from the original uvm struct.
+ * See uvm/uvm.h for the full struct.
+ */
+
+struct uvm {
+	/* vm_page related parameters */
+
+	bool page_init_done;		/* TRUE if uvm_page_init() finished */
+} uvm;
+
+#include <sys/kmem.h>
+
+void *
+kmem_alloc(size_t size, km_flag_t flags)
+{
+	return malloc(size);
+}
+
+void *
+kmem_zalloc(size_t size, km_flag_t flags)
+{
+	void *ptr;
+	ptr = malloc(size);
+
+	memset(ptr, 0, size);
+
+	return ptr;
+}
+
+void
+kmem_free(void *mem, size_t size)
+{
+	free(mem);
+}
+
+static void
+panic(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vprintf(fmt, ap);
+	printf("\n");
+	va_end(ap);
+	KASSERT(false);
+
+	/*NOTREACHED*/
+}
+
+static void
+uvm_pagefree(struct vm_page *pg)
+{
+	return;
+}
+
+#if defined(UVM_HOTPLUG)
+static void
+uvmpdpol_reinit(void)
+{
+	return;
+}
+#endif /* UVM_HOTPLUG */
+
+/* end - Provide Kernel API equivalents */
+
+
+#include "uvm/uvm_physseg.c"
+
+#include <atf-c.h>
+
+#define SIXTYFOUR_KILO (64 * 1024)
+#define ONETWENTYEIGHT_KILO (128 * 1024)
+#define TWOFIFTYSIX_KILO (256 * 1024)
+#define FIVEONETWO_KILO (512 * 1024)
+#define ONE_MEGABYTE (1024 * 1024)
+#define TWO_MEGABYTE (2 * 1024 * 1024)
+
+/* Sample Page Frame Numbers */
+#define VALID_START_PFN_1 atop(0)
+#define VALID_END_PFN_1 atop(ONE_MEGABYTE)
+#define VALID_AVAIL_START_PFN_1 atop(0)
+#define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
+
+#define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
+#define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
+#define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
+#define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
+
+#define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
+#define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
+#define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
+#define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
+
+#define VALID_START_PFN_4 atop((ONE_MEGABYTE * 3) + 1)
+#define VALID_END_PFN_4 atop(ONE_MEGABYTE * 4)
+#define VALID_AVAIL_START_PFN_4 atop((ONE_MEGABYTE * 3) + 1)
+#define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 4)
+
+/*
+ * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
+ */
+#define PAGE_COUNT_1M      256
+
+/*
+ * A debug fucntion to print the content of upm.
+ */
+static inline void
+uvm_physseg_dump_seg(uvm_physseg_t upm)
+{
+#if defined(DEBUG)
+	printf("%s: seg->start == %ld\n", __func__,
+	    uvm_physseg_get_start(upm));
+	printf("%s: seg->end == %ld\n", __func__,
+	    uvm_physseg_get_end(upm));
+	printf("%s: seg->avail_start == %ld\n", __func__,
+	    uvm_physseg_get_avail_start(upm));
+	printf("%s: seg->avail_end == %ld\n", __func__,
+	    uvm_physseg_get_avail_end(upm));
+
+	printf("====\n\n");
+#else
+	return;
+#endif /* DEBUG */
+}
+
+/*
+ * Private accessor that gets the value of uvm_physseg_graph.nentries
+ */
+static int
+uvm_physseg_get_entries(void)
+{
+#if defined(UVM_HOTPLUG)
+	return uvm_physseg_graph.nentries;
+#else
+	return vm_nphysmem;
+#endif /* UVM_HOTPLUG */
+}
+
+#if !defined(UVM_HOTPLUG)
+static void *
+uvm_physseg_alloc(size_t sz)
+{
+	return &vm_physmem[vm_nphysseg++];
+}
+#endif
+
+/*
+ * Test Fixture SetUp().
+ */
+static void
+setup(void)
+{
+	/* Prerequisites for running certain calls in uvm_physseg */
+	uvmexp.pagesize = PAGE_SIZE;
+	uvmexp.npages = 0;
+	uvm.page_init_done = false;
+	uvm_physseg_init();
+}
+
+
+/* <---- Tests for Internal functions ----> */
+#if defined(UVM_HOTPLUG)
+ATF_TC(uvm_physseg_alloc_atboot_mismatch);
+ATF_TC_HEAD(uvm_physseg_alloc_atboot_mismatch, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_alloc() sanity"
+	    "size mismatch alloc() test.");
+}
+
+ATF_TC_BODY(uvm_physseg_alloc_atboot_mismatch, tc)
+{
+	uvm.page_init_done = false;
+
+	atf_tc_expect_signal(SIGABRT, "size mismatch alloc()");
+
+	uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1);
+}
+
+ATF_TC(uvm_physseg_alloc_atboot_overrun);
+ATF_TC_HEAD(uvm_physseg_alloc_atboot_overrun, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_alloc() sanity"
+	    "array overrun alloc() test.");
+}
+
+ATF_TC_BODY(uvm_physseg_alloc_atboot_overrun, tc)
+{
+	uvm.page_init_done = false;
+
+	atf_tc_expect_signal(SIGABRT, "array overrun alloc()");
+
+	uvm_physseg_alloc((VM_PHYSSEG_MAX + 1) * sizeof(struct uvm_physseg));
+
+}
+
+ATF_TC(uvm_physseg_alloc_sanity);
+ATF_TC_HEAD(uvm_physseg_alloc_sanity, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "further uvm_physseg_alloc() sanity checks");
+}
+
+ATF_TC_BODY(uvm_physseg_alloc_sanity, tc)
+{
+
+	/* At boot time */
+	uvm.page_init_done = false;
+
+	/* Correct alloc */
+	ATF_REQUIRE(uvm_physseg_alloc(VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
+
+	/* Retry static alloc()s as dynamic - we expect them to pass */
+	uvm.page_init_done = true;
+	ATF_REQUIRE(uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1));
+	ATF_REQUIRE(uvm_physseg_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
+}
+
+ATF_TC(uvm_physseg_free_atboot_mismatch);
+ATF_TC_HEAD(uvm_physseg_free_atboot_mismatch, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physseg_free() sanity"
+	    "size mismatch free() test.");
+}
+
+ATF_TC_BODY(uvm_physseg_free_atboot_mismatch, tc)
+{
+	uvm.page_init_done = false;
+
+	atf_tc_expect_signal(SIGABRT, "size mismatch free()");
+
+	uvm_physseg_free(&uvm_physseg[0], sizeof(struct uvm_physseg) - 1);
+}
+
+ATF_TC(uvm_physseg_free_sanity);
+ATF_TC_HEAD(uvm_physseg_free_sanity, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "further uvm_physseg_free() sanity checks");
+}
+
+ATF_TC_BODY(uvm_physseg_free_sanity, tc)
+{
+
+	/* At boot time */
+	uvm.page_init_done = false;
+
+	/*
+	 * Note: free()ing the entire array is considered to be an
+	 * error. Thus VM_PHYSSEG_MAX - 1.
+	 */
+
+	struct uvm_physseg *seg;
+	seg = uvm_physseg_alloc((VM_PHYSSEG_MAX - 1) * sizeof(*seg));
+	uvm_physseg_free(seg, (VM_PHYSSEG_MAX - 1) * sizeof(struct uvm_physseg));
+
+	/* Retry static alloc()s as dynamic - we expect them to pass */
+	uvm.page_init_done = true;
+
+	seg = uvm_physseg_alloc(sizeof(struct uvm_physseg) - 1);
+	uvm_physseg_free(seg, sizeof(struct uvm_physseg) - 1);
+
+	seg = uvm_physseg_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
+
+	uvm_physseg_free(seg, 2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
+}
+
+ATF_TC(uvm_physseg_atboot_free_leak);
+ATF_TC_HEAD(uvm_physseg_atboot_free_leak, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "does free() leak at boot ?");
+}
+
+ATF_TC_BODY(uvm_physseg_atboot_free_leak, tc)
+{
+
+	/* At boot time */
+	uvm.page_init_done = false;
+
+	/* alloc to array size */
+	struct uvm_physseg *seg;
+	seg = uvm_physseg_alloc(VM_PHYSSEG_MAX * sizeof(*seg));
+
+	uvm_physseg_free(seg, sizeof(*seg));
+
+	atf_tc_expect_signal(SIGABRT, "array overrun on alloc() after leak");
+
+	ATF_REQUIRE(uvm_physseg_alloc(sizeof(struct uvm_physseg)));
+}
+#endif /* UVM_HOTPLUG */
+
+/*
+ * Note: This function replicates verbatim what happens in
+ * uvm_page.c:uvm_page_init().
+ *
+ * Please track any changes that happen there.
+ */
+static void
+uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
+{
+	uvm_physseg_t bank;
+	size_t n;
+
+	for (bank = uvm_physseg_get_first(),
+		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
+	     uvm_physseg_valid(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+
+		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+		uvm_physseg_seg_alloc_from_slab(bank, n);
+		uvm_physseg_init_seg(bank, pagearray);
+
+		/* set up page array pointers */
+		pagearray += n;
+		pagecount -= n;
+	}
+
+	uvm.page_init_done = true;
+}
+
+ATF_TC(uvm_physseg_plug);
+ATF_TC_HEAD(uvm_physseg_plug, tc)
+{
+	atf_tc_set_md_var(tc, "descr",
+	    "Test plug functionality.");
+}
+ATF_TC_BODY(uvm_physseg_plug, tc)
+{
+	uvm_physseg_t upm1, upm2, upm3, upm4;
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+	psize_t npages3 = (VALID_END_PFN_3 - VALID_START_PFN_3);
+	psize_t npages4 = (VALID_END_PFN_4 - VALID_START_PFN_4);
+	struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2 + npages3));
+
+	/* Fake early boot */
+
+	setup();
+
+	/* Vanilla plug x 2 */
+	ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_1, npages1, &upm1), true);
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_2, npages2, &upm2), true);
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2 + npages3);
+	ATF_CHECK_EQ(npages1 + npages2, uvmexp.npages);
+
+	/* Scavenge plug - goes into the same slab */
+	ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_3, npages3, &upm3), true);
+	ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
+	ATF_REQUIRE_EQ(npages1 + npages2 + npages3, uvmexp.npages);
+
+	/* Scavenge plug should fit right in the slab */
+	pgs = uvm_physseg_get_pg(upm3, 0);
+	ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3));
+
+	/* Hot plug - goes into a brand new slab */
+	ATF_REQUIRE_EQ(uvm_physseg_plug(VALID_START_PFN_4, npages4, &upm4), true);
+	/* The hot plug slab should have nothing to do with the original slab */
+	pgs = uvm_physseg_get_pg(upm4, 0);
+	ATF_REQUIRE(pgs < slab || pgs > (slab + npages1 + npages2 + npages3));
+
+}
+ATF_TC(uvm_physseg_unplug);
+ATF_TC_HEAD(uvm_physseg_unplug, tc)
+{
+	atf_tc_set_md_var(tc, "descr",
+	    "Test unplug functionality.");
+}
+ATF_TC_BODY(uvm_physseg_unplug, tc)
+{
+	paddr_t pa = 0;
+
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+	psize_t npages3 = (VALID_END_PFN_3 - VALID_START_PFN_3);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2 + npages3));
+		
+	uvm_physseg_t upm;
+	
+	/* Boot time */
+	setup();
+
+	/* We start with zero segments */
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(atop(0), atop(ONE_MEGABYTE), NULL));
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+	/* Do we have an arbitrary offset in there ? */
+	uvm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
+	ATF_REQUIRE_EQ(pa, atop(TWOFIFTYSIX_KILO));
+	ATF_REQUIRE_EQ(0, uvmexp.npages); /* Boot time sanity */
+
+	/* Now let's unplug from the middle */
+	ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(TWOFIFTYSIX_KILO), atop(FIVEONETWO_KILO)));
+	/* verify that a gap exists at TWOFIFTYSIX_KILO */
+	pa = 0; /* reset */
+	uvm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
+	ATF_REQUIRE_EQ(pa, 0);
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2 + npages3);
+	/* Account for the unplug */
+	ATF_CHECK_EQ(atop(FIVEONETWO_KILO), uvmexp.npages);
+
+	/* Original entry should fragment into two */
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+	
+	upm = uvm_physseg_find(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), NULL);
+
+	ATF_REQUIRE(uvm_physseg_valid(upm));
+	
+	/* Now unplug the tail fragment - should swallow the complete entry */
+	ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), atop(TWOFIFTYSIX_KILO)));
+
+	/* The "swallow" above should have invalidated the handle */
+	ATF_REQUIRE_EQ(false, uvm_physseg_valid(upm));
+	
+	/* Only the first one is left now */
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Unplug from the back */
+	ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(ONETWENTYEIGHT_KILO), atop(ONETWENTYEIGHT_KILO)));
+	/* Shouldn't change the number of segments */
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Unplug from the front */
+	ATF_REQUIRE_EQ(true, uvm_physseg_unplug(0, atop(SIXTYFOUR_KILO)));
+	/* Shouldn't change the number of segments */
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Unplugging the final fragment should fail */
+	atf_tc_expect_signal(SIGABRT, "Unplugging the last segment");
+	ATF_REQUIRE_EQ(true, uvm_physseg_unplug(atop(SIXTYFOUR_KILO), atop(SIXTYFOUR_KILO)));
+}
+
+
+/* <---- end Tests for Internal functions ----> */
+
+/* Tests for functions exported via uvm_physseg.h */
+ATF_TC(uvm_physseg_init);
+ATF_TC_HEAD(uvm_physseg_init, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_init() call\
+	    initializes the vm_physmem struct which holds the rb_tree.");
+}
+ATF_TC_BODY(uvm_physseg_init, tc)
+{
+	uvm_physseg_init();
+
+	ATF_REQUIRE_EQ(0, uvm_physseg_get_entries());
+}
+
+ATF_TC(uvm_page_physload_preload);
+ATF_TC_HEAD(uvm_page_physload_preload, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physload() \
+	    call works without a panic() in a preload scenario.");
+}
+ATF_TC_BODY(uvm_page_physload_preload, tc)
+{
+	uvm_physseg_t upm;
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Should return a valid handle */
+	ATF_REQUIRE(uvm_physseg_valid(upm));
+
+	/* No pages should be allocated yet */
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* After the first call one segment should exist */
+	ATF_CHECK_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	/* Should return a valid handle */
+	ATF_REQUIRE(uvm_physseg_valid(upm));
+
+	ATF_REQUIRE_EQ(npages1 + npages2, uvmexp.npages);
+
+	/* After the second call two segments should exist */
+	ATF_CHECK_EQ(2, uvm_physseg_get_entries());
+}
+
+ATF_TC(uvm_physseg_handle_immutable);
+ATF_TC_HEAD(uvm_physseg_handle_immutable, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the uvm_physseg_t handle is \
+	    immutable.");
+}
+ATF_TC_BODY(uvm_physseg_handle_immutable, tc)
+{
+	uvm_physseg_t upm;
+
+	/* We insert the segments in out of order */
+
+	setup();
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY, uvm_physseg_get_prev(upm));
+
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	/* Fetch Previous, we inserted a lower value */
+	upm = uvm_physseg_get_prev(upm);
+
+#if !defined(UVM_HOTPLUG)
+	/*
+	 * This test is going to fail for the Array Implementation but is
+	 * expected to pass in the RB Tree implementation.
+	 */
+	atf_tc_expect_fail("Mutable handle in static array impl.");
+#endif
+	ATF_CHECK(UVM_PHYSSEG_TYPE_INVALID_EMPTY != upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+}
+
+ATF_TC(uvm_physseg_seg_chomp_slab);
+ATF_TC_HEAD(uvm_physseg_seg_chomp_slab, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "The slab import code.()");
+
+}
+ATF_TC_BODY(uvm_physseg_seg_chomp_slab, tc)
+{
+	struct uvm_physseg *seg;
+	struct vm_page *slab, *pgs;
+	size_t i;
+	const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
+
+	setup();
+
+	/* This is boot time */
+	slab = malloc(sizeof(struct vm_page) * npages * 2);
+
+	seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
+
+	uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
+
+	/* Should be able to allocate two 128 * sizeof(*slab) */
+	ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
+	extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
+
+	/* Try alloc/free at static time */
+	for (i = 0; i < npages; i++) {
+		ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
+		extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
+	}
+
+	/* Now setup post boot */
+	uvm.page_init_done = true;
+
+	/* XXX: Need to find a way to test EX_MALLOCOK fails at boot time, with atf */
+
+	/* Try alloc/free after uvm_page.c:uvm_page_init() as well */
+	for (i = 0; i < npages; i++) {
+		ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
+		extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO);
+	}
+
+}
+
+ATF_TC(uvm_physseg_alloc_from_slab);
+ATF_TC_HEAD(uvm_physseg_alloc_from_slab, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "The slab alloc code.()");
+
+}
+ATF_TC_BODY(uvm_physseg_alloc_from_slab, tc)
+{
+	struct uvm_physseg *seg;
+	struct vm_page *slab, *pgs;
+	const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
+
+	setup();
+
+	/* This is boot time */
+	slab = malloc(sizeof(struct vm_page) * npages * 2);
+
+	seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
+
+	uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
+
+	pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
+
+	ATF_REQUIRE(pgs != NULL);
+
+	/* Now setup post boot */
+	uvm.page_init_done = true;
+
+	pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
+	ATF_REQUIRE(pgs != NULL);
+
+	atf_tc_expect_fail("alloc beyond extent");
+
+	pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
+	ATF_REQUIRE(pgs != NULL);
+}
+
+ATF_TC(uvm_physseg_init_seg);
+ATF_TC_HEAD(uvm_physseg_init_seg, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if uvm_physseg_init_seg adds pages to"
+	    "uvmexp.npages");
+}
+ATF_TC_BODY(uvm_physseg_init_seg, tc)
+{
+	struct uvm_physseg *seg;
+	struct vm_page *slab, *pgs;
+	const size_t npages = UVM_PHYSSEG_BOOT_UNPLUG_MAX; /* Number of pages */
+
+	setup();
+
+	/* This is boot time */
+	slab = malloc(sizeof(struct vm_page) * npages * 2);
+
+	seg = uvm_physseg_alloc(sizeof(struct uvm_physseg));
+
+	uvm_physseg_seg_chomp_slab(PHYSSEG_NODE_TO_HANDLE(seg), slab, npages * 2);
+
+	pgs = uvm_physseg_seg_alloc_from_slab(PHYSSEG_NODE_TO_HANDLE(seg), npages);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	seg->start = 0;
+	seg->end = npages;
+
+	seg->avail_start = 0;
+	seg->avail_end = npages;
+
+	uvm_physseg_init_seg(PHYSSEG_NODE_TO_HANDLE(seg), pgs);
+
+	ATF_REQUIRE_EQ(npages, uvmexp.npages);
+}
+
+#if 0
+ATF_TC(uvm_physseg_init_seg);
+ATF_TC_HEAD(uvm_physseg_init_seg, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physload() \
+	    call works without a panic() after Segment is inited.");
+}
+ATF_TC_BODY(uvm_physseg_init_seg, tc)
+{
+	uvm_physseg_t upm;
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(0, uvmexp.npages);
+
+	/*
+	 * Boot time physplug needs explicit external init,
+	 * Duplicate what uvm_page.c:uvm_page_init() does.
+	 * Note: not everything uvm_page_init() does gets done here.
+	 * Read the source.
+	 */
+	/* suck in backing slab, initialise extent. */
+	uvm_physseg_seg_chomp_slab(upm, pgs, npages);
+
+	/*
+	 * Actual pgs[] allocation, from extent.
+	 */
+	uvm_physseg_alloc_from_slab(upm, npages);
+
+	/* Now we initialize the segment */
+	uvm_physseg_init_seg(upm, pgs);
+
+	/* Done with boot simulation */
+	extent_init();
+	uvm.page_init_done = true;
+
+	/* We have total memory of 1MB */
+	ATF_CHECK_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	upm =uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	/* We added another 1MB so PAGE_COUNT_1M + PAGE_COUNT_1M */
+	ATF_CHECK_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+}
+#endif
+
+ATF_TC(uvm_physseg_get_start);
+ATF_TC_HEAD(uvm_physseg_get_start, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the start PFN is returned \
+            correctly from a segment created via uvm_page_physload().");
+}
+ATF_TC_BODY(uvm_physseg_get_start, tc)
+{
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+}
+
+ATF_TC(uvm_physseg_get_start_invalid);
+ATF_TC_HEAD(uvm_physseg_get_start_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
+	    correctly when uvm_physseg_get_start() is called with invalid \
+            parameter values.");
+}
+ATF_TC_BODY(uvm_physseg_get_start_invalid, tc)
+{
+	/* Check for pgs == NULL */
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	/* Invalid uvm_physseg_t */
+	ATF_CHECK_EQ((paddr_t) -1,
+	    uvm_physseg_get_start(UVM_PHYSSEG_TYPE_INVALID));
+}
+
+ATF_TC(uvm_physseg_get_end);
+ATF_TC_HEAD(uvm_physseg_get_end, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the end PFN is returned \
+	    correctly from a segment created via uvm_page_physload().");
+}
+ATF_TC_BODY(uvm_physseg_get_end, tc)
+{
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+}
+
+ATF_TC(uvm_physseg_get_end_invalid);
+ATF_TC_HEAD(uvm_physseg_get_end_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
+	    correctly when uvm_physseg_get_end() is called with invalid \
+            parameter values.");
+}
+ATF_TC_BODY(uvm_physseg_get_end_invalid, tc)
+{
+	/* Check for pgs == NULL */
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	/* Invalid uvm_physseg_t */
+	ATF_CHECK_EQ((paddr_t) -1,
+	    uvm_physseg_get_end(UVM_PHYSSEG_TYPE_INVALID));
+}
+
+ATF_TC(uvm_physseg_get_avail_start);
+ATF_TC_HEAD(uvm_physseg_get_avail_start, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the avail_start PFN is \
+            returned correctly from a segment created via uvm_page_physload().");
+}
+ATF_TC_BODY(uvm_physseg_get_avail_start, tc)
+{
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
+}
+
+ATF_TC(uvm_physseg_get_avail_start_invalid);
+ATF_TC_HEAD(uvm_physseg_get_avail_start_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
+	    correctly when uvm_physseg_get_avail_start() is called with invalid\
+            parameter values.");
+}
+ATF_TC_BODY(uvm_physseg_get_avail_start_invalid, tc)
+{
+	/* Check for pgs == NULL */
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	/* Invalid uvm_physseg_t */
+	ATF_CHECK_EQ((paddr_t) -1,
+	    uvm_physseg_get_avail_start(UVM_PHYSSEG_TYPE_INVALID));
+}
+
+ATF_TC(uvm_physseg_get_avail_end);
+ATF_TC_HEAD(uvm_physseg_get_avail_end, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the avail_end PFN is \
+            returned correctly from a segment created via uvm_page_physload().");
+}
+ATF_TC_BODY(uvm_physseg_get_avail_end, tc)
+{
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_physseg_get_avail_end_invalid);
+ATF_TC_HEAD(uvm_physseg_get_avail_end_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the invalid / error conditions \
+	    correctly when uvm_physseg_get_avail_end() is called with invalid\
+            parameter values.");
+}
+ATF_TC_BODY(uvm_physseg_get_avail_end_invalid, tc)
+{
+	/* Check for pgs == NULL */
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	/* Invalid uvm_physseg_t */
+	ATF_CHECK_EQ((paddr_t) -1,
+	    uvm_physseg_get_avail_end(UVM_PHYSSEG_TYPE_INVALID));
+}
+
+ATF_TC(uvm_physseg_get_next);
+ATF_TC_HEAD(uvm_physseg_get_next, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer values for next \
+            segment using the uvm_physseg_get_next() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_next, tc)
+{
+	uvm_physseg_t upm;
+	uvm_physseg_t upm_next;
+
+	/* We insert the segments in ascending order */
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_OVERFLOW,
+	    uvm_physseg_get_next(upm));
+
+	upm_next = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	upm = uvm_physseg_get_next(upm); /* Fetch Next */
+
+	ATF_CHECK_EQ(upm_next, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+
+	upm_next = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
+
+	upm = uvm_physseg_get_next(upm); /* Fetch Next */
+
+	ATF_CHECK_EQ(upm_next, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physseg_get_end(upm));
+}
+
+ATF_TC(uvm_physseg_get_prev);
+ATF_TC_HEAD(uvm_physseg_get_prev, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer values for previous \
+            segment using the uvm_physseg_get_prev() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_prev, tc)
+{
+	uvm_physseg_t upm;
+	uvm_physseg_t upm_prev;
+
+	setup();
+	upm_prev = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY,
+	    uvm_physseg_get_prev(upm_prev));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	/* Fetch Previous, we inserted a lower value */
+	upm = uvm_physseg_get_prev(upm);
+
+	ATF_CHECK_EQ(upm_prev, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
+
+	/*
+	 * This will return a UVM_PHYSSEG_TYPE_INVALID_EMPTY we are at the
+	 * lowest
+	 */
+	upm = uvm_physseg_get_prev(upm);
+
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID_EMPTY, upm);
+}
+
+ATF_TC(uvm_physseg_get_first);
+ATF_TC_HEAD(uvm_physseg_get_first, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer values for first \
+            segment (lowest node) using the uvm_physseg_get_first() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_first, tc)
+{
+	uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID_EMPTY;
+	uvm_physseg_t upm_first;
+
+	setup();
+
+	/* No nodes exist */
+	ATF_CHECK_EQ(upm, uvm_physseg_get_first());
+
+	upm_first = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Pointer to first should be the least valued node */
+	upm = uvm_physseg_get_first();
+	ATF_CHECK_EQ(upm_first, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+
+	/* Insert a node of lesser value */
+	upm_first = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	/* Pointer to first should be the least valued node */
+	upm = uvm_physseg_get_first();
+	ATF_CHECK_EQ(upm_first, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
+
+	/* Insert a node of higher value */
+	upm_first =uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
+
+	/* Pointer to first should be the least valued node */
+	upm = uvm_physseg_get_first();
+	ATF_CHECK(upm_first != upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_physseg_get_last);
+ATF_TC_HEAD(uvm_physseg_get_last, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer values for last \
+            segment using the uvm_physseg_get_last() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_last, tc)
+{
+	uvm_physseg_t upm = UVM_PHYSSEG_TYPE_INVALID_EMPTY;
+	uvm_physseg_t upm_last;
+
+	setup();
+
+	/* No nodes exist */
+	ATF_CHECK_EQ(upm, uvm_physseg_get_last());
+
+	upm_last = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Pointer to last should be the most valued node */
+	upm = uvm_physseg_get_last();
+	ATF_CHECK_EQ(upm_last, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physseg_get_avail_end(upm));
+
+	/* Insert node of greater value */
+	upm_last = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	/* Pointer to last should be the most valued node */
+	upm = uvm_physseg_get_last();
+	ATF_CHECK_EQ(upm_last, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+
+	/* Insert node of greater value */
+	upm_last = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(3, uvm_physseg_get_entries());
+
+	/* Pointer to last should be the most valued node */
+	upm = uvm_physseg_get_last();
+	ATF_CHECK_EQ(upm_last, upm);
+	ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_3, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_physseg_valid);
+ATF_TC_HEAD(uvm_physseg_valid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer value for current \
+            segment is valid using the uvm_physseg_valid() call.");
+}
+ATF_TC_BODY(uvm_physseg_valid, tc)
+{
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_CHECK_EQ(true, uvm_physseg_valid(upm));
+}
+
+ATF_TC(uvm_physseg_valid_invalid);
+ATF_TC_HEAD(uvm_physseg_valid_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests the pointer value for current \
+            segment is invalid using the uvm_physseg_valid() call.");
+}
+ATF_TC_BODY(uvm_physseg_valid_invalid, tc)
+{
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	/* Invalid uvm_physseg_t */
+	ATF_CHECK_EQ(false, uvm_physseg_valid(UVM_PHYSSEG_TYPE_INVALID));
+
+	/*
+	 * Without any pages initialized for segment, it is considered
+	 * invalid
+	 */
+	ATF_CHECK_EQ(false, uvm_physseg_valid(upm));
+}
+
+ATF_TC(uvm_physseg_get_highest);
+ATF_TC_HEAD(uvm_physseg_get_highest, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned PFN matches  \
+            the highest PFN in use by the system.");
+}
+ATF_TC_BODY(uvm_physseg_get_highest, tc)
+{
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Only one segment so highest is the current */
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1 - 1, uvm_physseg_get_highest_frame());
+
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_DEFAULT);
+
+	/* PFN_3 > PFN_1 */
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physseg_get_highest_frame());
+
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	/* PFN_3 > PFN_2 */
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physseg_get_highest_frame());
+}
+
+ATF_TC(uvm_physseg_get_free_list);
+ATF_TC_HEAD(uvm_physseg_get_free_list, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned Free List type \
+            of a segment matches the one returned from \
+            uvm_physseg_get_free_list() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_free_list, tc)
+{
+	uvm_physseg_t upm;
+
+	/* Insertions are made in ascending order */
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_CHECK_EQ(VM_FREELIST_DEFAULT, uvm_physseg_get_free_list(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_FIRST16);
+
+	ATF_CHECK_EQ(VM_FREELIST_FIRST16, uvm_physseg_get_free_list(upm));
+
+	upm = uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
+	    VALID_AVAIL_START_PFN_3, VALID_AVAIL_END_PFN_3, VM_FREELIST_FIRST1G);
+
+	ATF_CHECK_EQ(VM_FREELIST_FIRST1G, uvm_physseg_get_free_list(upm));
+}
+
+ATF_TC(uvm_physseg_get_start_hint);
+ATF_TC_HEAD(uvm_physseg_get_start_hint, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned start_hint value \
+            of a segment matches the one returned from \
+            uvm_physseg_get_start_hint() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_start_hint, tc)
+{
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Will be Zero since no specific value is set during init */
+	ATF_CHECK_EQ(0, uvm_physseg_get_start_hint(upm));
+}
+
+ATF_TC(uvm_physseg_set_start_hint);
+ATF_TC_HEAD(uvm_physseg_set_start_hint, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned start_hint value \
+            of a segment matches the one set by the \
+            uvm_physseg_set_start_hint() call.");
+}
+ATF_TC_BODY(uvm_physseg_set_start_hint, tc)
+{
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_CHECK_EQ(true, uvm_physseg_set_start_hint(upm, atop(128)));
+
+	/* Will be atop(128) since no specific value is set above */
+	ATF_CHECK_EQ(atop(128), uvm_physseg_get_start_hint(upm));
+}
+
+ATF_TC(uvm_physseg_set_start_hint_invalid);
+ATF_TC_HEAD(uvm_physseg_set_start_hint_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned value is false \
+            when an invalid segment matches the one trying to set by the \
+            uvm_physseg_set_start_hint() call.");
+}
+ATF_TC_BODY(uvm_physseg_set_start_hint_invalid, tc)
+{
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	/* Force other check conditions */
+	uvm.page_init_done = true;
+
+	ATF_REQUIRE_EQ(true, uvm.page_init_done);
+
+	ATF_CHECK_EQ(false, uvm_physseg_set_start_hint(upm, atop(128)));
+
+	/*
+	 * Will be Zero since no specific value is set after the init
+	 * due to failure
+	 */
+	ATF_CHECK_EQ(0, uvm_physseg_get_start_hint(upm));
+}
+
+ATF_TC(uvm_physseg_get_pg);
+ATF_TC_HEAD(uvm_physseg_get_pg, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned vm_page struct \
+            is correct when fetched by uvm_physseg_get_pg() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_pg, tc)
+{
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	struct vm_page *extracted_pg = NULL;
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Now we initialize the segment */
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(NULL, extracted_pg);
+
+	/* Try fetching the 5th Page in the Segment */
+	extracted_pg = uvm_physseg_get_pg(upm, 5);
+
+	/* Values of phys_addr is n * PAGE_SIZE where n is the page number */
+	ATF_CHECK_EQ(5 * PAGE_SIZE, extracted_pg->phys_addr);
+
+	/* Try fetching the 113th Page in the Segment */
+	extracted_pg = uvm_physseg_get_pg(upm, 113);
+
+	ATF_CHECK_EQ(113 * PAGE_SIZE, extracted_pg->phys_addr);
+}
+
+#ifdef __HAVE_PMAP_PHYSSEG
+ATF_TC(uvm_physseg_get_pmseg);
+ATF_TC_HEAD(uvm_physseg_get_pmseg, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned pmap_physseg \
+            struct is correct when fetched by uvm_physseg_get_pmseg() call.");
+}
+ATF_TC_BODY(uvm_physseg_get_pmseg, tc)
+{
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	struct pmap_physseg pmseg = { true };
+
+	struct pmap_physseg *extracted_pmseg = NULL;
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Now we initialize the segment */
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_REQUIRE_EQ(PAGE_COUNT_1M, uvmexp.npages);
+
+	ATF_REQUIRE_EQ(NULL, extracted_pmseg);
+
+	ATF_REQUIRE_EQ(true, pmseg.dummy_variable);
+
+	/* Extract the current pmseg */
+	extracted_pmseg = uvm_physseg_get_pmseg(upm);
+
+        /*
+         * We can only check if it is not NULL
+         * We do not know the value it contains
+         */
+	ATF_CHECK(NULL != extracted_pmseg);
+
+	extracted_pmseg->dummy_variable = pmseg.dummy_variable;
+
+	/* Invert value to ensure test integrity */
+	pmseg.dummy_variable = false;
+
+	ATF_REQUIRE_EQ(false, pmseg.dummy_variable);
+
+	extracted_pmseg = uvm_physseg_get_pmseg(upm);
+
+	ATF_CHECK(NULL != extracted_pmseg);
+
+	ATF_CHECK_EQ(true, extracted_pmseg->dummy_variable);
+}
+#endif
+
+ATF_TC(vm_physseg_find);
+ATF_TC_HEAD(vm_physseg_find, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned segment number \
+            is correct when an PFN is passed into uvm_physseg_find() call. \
+            In addition	to this the offset of the PFN from the start of \
+            segment is also set if the parameter is passed in as not NULL.");
+}
+ATF_TC_BODY(vm_physseg_find, tc)
+{
+	psize_t offset = (psize_t) -1;
+
+	uvm_physseg_t upm_first, upm_second, result;
+
+	setup();
+	upm_first = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	upm_second = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Under ONE_MEGABYTE is segment upm_first */
+	result = uvm_physseg_find(atop(ONE_MEGABYTE - 1024), NULL);
+	ATF_CHECK_EQ(upm_first, result);
+	ATF_CHECK_EQ(uvm_physseg_get_start(upm_first),
+	    uvm_physseg_get_start(result));
+	ATF_CHECK_EQ(uvm_physseg_get_end(upm_first),
+	    uvm_physseg_get_end(result));
+	ATF_CHECK_EQ(uvm_physseg_get_avail_start(upm_first),
+	    uvm_physseg_get_avail_start(result));
+	ATF_CHECK_EQ(uvm_physseg_get_avail_end(upm_first),
+	    uvm_physseg_get_avail_end(result));
+
+	ATF_REQUIRE_EQ((psize_t) -1, offset);
+
+	/* Over ONE_MEGABYTE is segment upm_second */
+	result = uvm_physseg_find(atop(ONE_MEGABYTE + 8192), &offset);
+	ATF_CHECK_EQ(upm_second, result);
+	ATF_CHECK_EQ(uvm_physseg_get_start(upm_second),
+	    uvm_physseg_get_start(result));
+	ATF_CHECK_EQ(uvm_physseg_get_end(upm_second),
+	    uvm_physseg_get_end(result));
+	ATF_CHECK_EQ(uvm_physseg_get_avail_start(upm_second),
+	    uvm_physseg_get_avail_start(result));
+	ATF_CHECK_EQ(uvm_physseg_get_avail_end(upm_second),
+	    uvm_physseg_get_avail_end(result));
+
+	/* Offset is calculated based on PAGE_SIZE */
+	/* atop(ONE_MEGABYTE + (2 * PAGE_SIZE)) - VALID_START_PFN1  = 2 */
+	ATF_CHECK_EQ(2, offset);
+}
+
+ATF_TC(vm_physseg_find_invalid);
+ATF_TC_HEAD(vm_physseg_find_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the returned segment number \
+            is (paddr_t) -1  when a non existant PFN is passed into \
+            uvm_physseg_find() call.");
+}
+ATF_TC_BODY(vm_physseg_find_invalid, tc)
+{
+	psize_t offset = (psize_t) -1;
+
+	setup();
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* No segments over 3 MB exists at the moment */
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID,
+	    uvm_physseg_find(atop(ONE_MEGABYTE * 3), NULL));
+
+	ATF_REQUIRE_EQ((psize_t) -1, offset);
+
+	/* No segments over 3 MB exists at the moment */
+	ATF_CHECK_EQ(UVM_PHYSSEG_TYPE_INVALID,
+	    uvm_physseg_find(atop(ONE_MEGABYTE * 3), &offset));
+
+	ATF_CHECK_EQ((psize_t) -1, offset);
+}
+
+ATF_TC(uvm_page_physunload_start);
+ATF_TC_HEAD(uvm_page_physunload_start, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
+	    call works without a panic(). Unloads from Start of the segment.");
+}
+ATF_TC_BODY(uvm_page_physunload_start, tc)
+{
+	/*
+	 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
+	 */
+	psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	/*
+	 * When called for first time, uvm_page_physload() removes the first PFN
+	 *
+	 * New avail start will be VALID_AVAIL_START_PFN_2 + 1
+	 */
+	ATF_CHECK_EQ(VALID_START_PFN_2, atop(p));
+
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
+	    uvm_physseg_get_avail_start(upm));
+
+	ATF_CHECK_EQ(VALID_START_PFN_2 + 1, uvm_physseg_get_start(upm));
+
+	/* Rest of the stuff should remain the same */
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_page_physunload_end);
+ATF_TC_HEAD(uvm_page_physunload_end, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
+	    call works without a panic(). Unloads from End of the segment.");
+}
+ATF_TC_BODY(uvm_page_physunload_end, tc)
+{
+	/*
+	 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
+	 */
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+
+	setup();
+	/* Note: start != avail_start to remove from end. */
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2 + 1, VALID_AVAIL_END_PFN_2,
+	    VM_FREELIST_DEFAULT);
+
+	p = 0;
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE(
+		uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm));
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	/*
+	 * Remember if X is the upper limit the actual valid pointer is X - 1
+	 *
+	 * For example if 256 is the upper limit for 1MB memory, last valid
+	 * pointer is 256 - 1 = 255
+	 */
+
+	ATF_CHECK_EQ(VALID_END_PFN_2 - 1, atop(p));
+
+	/*
+	 * When called for second time, uvm_page_physload() removes the last PFN
+	 *
+	 * New avail end will be VALID_AVAIL_END_PFN_2 - 1
+	 * New end will be VALID_AVAIL_PFN_2 - 1
+	 */
+
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1, uvm_physseg_get_avail_end(upm));
+
+	ATF_CHECK_EQ(VALID_END_PFN_2 - 1, uvm_physseg_get_end(upm));
+
+	/* Rest of the stuff should remain the same */
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
+	    uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+}
+
+ATF_TC(uvm_page_physunload_none);
+ATF_TC_HEAD(uvm_page_physunload_none, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic uvm_page_physunload()\
+	    call works without a panic(). Does not unload from start or end \
+            because of non-aligned start / avail_start and end / avail_end \
+            respectively.");
+}
+ATF_TC_BODY(uvm_page_physunload_none, tc)
+{
+	psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+
+	setup();
+	/*
+	 * Note: start != avail_start and end != avail_end.
+	 *
+	 * This prevents any unload from occuring.
+	 */
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2 + 1, VALID_AVAIL_END_PFN_2 - 1,
+	    VM_FREELIST_DEFAULT);
+
+	p = 0;
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_REQUIRE(
+		uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm));
+
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_CHECK_EQ(false, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	/* uvm_page_physload() will no longer unload memory */
+	ATF_CHECK_EQ(0, p);
+
+	/* Rest of the stuff should remain the same */
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
+	    uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1,
+	    uvm_physseg_get_avail_end(upm));
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+}
+
+ATF_TC(uvm_page_physunload_delete_start);
+ATF_TC_HEAD(uvm_page_physunload_delete_start, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the  uvm_page_physunload() \
+	    works when the segment gets small enough to be deleted scenario. \
+            NOTE: This one works deletes from start.");
+}
+ATF_TC_BODY(uvm_page_physunload_delete_start, tc)
+{
+	/*
+	 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
+	 */
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+
+	setup();
+
+	/*
+	 * Setup the Nuke from Starting point
+	 */
+
+	upm = uvm_page_physload(VALID_END_PFN_1 - 1, VALID_END_PFN_1,
+	    VALID_AVAIL_END_PFN_1 - 1, VALID_AVAIL_END_PFN_1,
+	    VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(1, uvmexp.npages);
+
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	ATF_CHECK_EQ(VALID_END_PFN_1 - 1, atop(p));
+
+	ATF_CHECK_EQ(1, uvm_physseg_get_entries());
+
+	/* The only node now is the one we inserted second. */
+	upm = uvm_physseg_get_first();
+
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_page_physunload_delete_end);
+ATF_TC_HEAD(uvm_page_physunload_delete_end, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the  uvm_page_physunload() \
+	    works when the segment gets small enough to be deleted scenario. \
+            NOTE: This one works deletes from end.");
+}
+ATF_TC_BODY(uvm_page_physunload_delete_end, tc)
+{
+	/*
+	 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
+	 */
+
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	setup();
+
+	/*
+	 * Setup the Nuke from Ending point
+	 */
+
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_START_PFN_1 + 2,
+	    VALID_AVAIL_START_PFN_1 + 1, VALID_AVAIL_START_PFN_1 + 2,
+	    VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(1, uvmexp.npages);
+
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	p = 0;
+
+	ATF_CHECK_EQ(true, uvm_page_physunload(upm, VM_FREELIST_DEFAULT, &p));
+
+	ATF_CHECK_EQ(VALID_START_PFN_1 + 2, atop(p));
+
+	ATF_CHECK_EQ(1, uvm_physseg_get_entries());
+
+	/* The only node now is the one we inserted second. */
+	upm = uvm_physseg_get_first();
+
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physseg_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physseg_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physseg_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physseg_get_avail_end(upm));
+}
+
+ATF_TC(uvm_page_physunload_invalid);
+ATF_TC_HEAD(uvm_page_physunload_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the  uvm_page_physunload() \
+	    fails when then Free list does not match.");
+}
+ATF_TC_BODY(uvm_page_physunload_invalid, tc)
+{
+	psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	uvm_physseg_init_seg(upm, pgs);
+
+	ATF_CHECK_EQ(false, uvm_page_physunload(upm, VM_FREELIST_FIRST4G, &p));
+}
+
+ATF_TC(uvm_page_physunload_force);
+ATF_TC_HEAD(uvm_page_physunload_force, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the basic \
+            uvm_page_physunload_force() including delete works without.");
+}
+ATF_TC_BODY(uvm_page_physunload_force, tc)
+{
+	/*
+	 * Would uvmexp.npages reduce everytime an uvm_page_physunload is called?
+	 */
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+	const psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+	const psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	/*
+	 * We have couple of physloads done this is bacause of the fact that if
+	 * we physunload all the PFs from a given range and we have only one
+	 * segment in total a panic() is called
+	 */
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(VALID_AVAIL_START_PFN_1,
+	    uvm_physseg_get_avail_start(upm));
+
+	for(paddr_t i = VALID_AVAIL_START_PFN_1;
+	    i < VALID_AVAIL_END_PFN_1; i++) {
+		ATF_CHECK_EQ(true,
+		    uvm_page_physunload_force(upm, VM_FREELIST_DEFAULT, &p));
+		ATF_CHECK_EQ(i, atop(p));
+
+		if(i + 1 < VALID_AVAIL_END_PFN_1)
+			ATF_CHECK_EQ(i + 1, uvm_physseg_get_avail_start(upm));
+	}
+
+	/*
+	 * Now we try to retrieve the segment, which has been removed
+	 * from the system through force unloading all the pages inside it.
+	 */
+	upm = uvm_physseg_find(VALID_AVAIL_END_PFN_1 - 1, NULL);
+
+	/* It should no longer exist */
+	ATF_CHECK_EQ(NULL, upm);
+
+	ATF_CHECK_EQ(1, uvm_physseg_get_entries());
+}
+
+ATF_TC(uvm_page_physunload_force_invalid);
+ATF_TC_HEAD(uvm_page_physunload_force_invalid, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Tests if the invalid conditions for \
+            uvm_page_physunload_force_invalid().");
+}
+ATF_TC_BODY(uvm_page_physunload_force_invalid, tc)
+{
+	paddr_t p = 0;
+
+	uvm_physseg_t upm;
+
+	setup();
+	upm = uvm_page_physload(VALID_START_PFN_2, VALID_START_PFN_2+ 1,
+	    VALID_START_PFN_2, VALID_START_PFN_2, VM_FREELIST_DEFAULT);
+
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
+
+	ATF_CHECK_EQ(false,
+	    uvm_page_physunload_force(upm, VM_FREELIST_DEFAULT, &p));
+
+	ATF_CHECK_EQ(0, p);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+#if defined(UVM_HOTPLUG)
+	/* Internal */
+	ATF_TP_ADD_TC(tp, uvm_physseg_alloc_atboot_mismatch);
+	ATF_TP_ADD_TC(tp, uvm_physseg_alloc_atboot_overrun);
+	ATF_TP_ADD_TC(tp, uvm_physseg_alloc_sanity);
+	ATF_TP_ADD_TC(tp, uvm_physseg_free_atboot_mismatch);
+	ATF_TP_ADD_TC(tp, uvm_physseg_free_sanity);
+	ATF_TP_ADD_TC(tp, uvm_physseg_atboot_free_leak);
+#endif /* UVM_HOTPLUG */
+	
+	ATF_TP_ADD_TC(tp, uvm_physseg_plug);
+	ATF_TP_ADD_TC(tp, uvm_physseg_unplug);
+
+	/* Exported */
+	ATF_TP_ADD_TC(tp, uvm_physseg_init);
+	ATF_TP_ADD_TC(tp, uvm_page_physload_preload);
+	ATF_TP_ADD_TC(tp, uvm_physseg_handle_immutable);
+	ATF_TP_ADD_TC(tp, uvm_physseg_seg_chomp_slab);
+	ATF_TP_ADD_TC(tp, uvm_physseg_alloc_from_slab);
+	ATF_TP_ADD_TC(tp, uvm_physseg_init_seg);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_start);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_start_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_end);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_end_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_start);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_start_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_end);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_avail_end_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_next);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_prev);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_first);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_last);
+	ATF_TP_ADD_TC(tp, uvm_physseg_valid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_valid_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_highest);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_free_list);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_start_hint);
+	ATF_TP_ADD_TC(tp, uvm_physseg_set_start_hint);
+	ATF_TP_ADD_TC(tp, uvm_physseg_set_start_hint_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_pg);
+
+#ifdef __HAVE_PMAP_PHYSSEG
+	ATF_TP_ADD_TC(tp, uvm_physseg_get_pmseg);
+#endif
+	ATF_TP_ADD_TC(tp, vm_physseg_find);
+	ATF_TP_ADD_TC(tp, vm_physseg_find_invalid);
+
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_start);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_end);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_none);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_delete_start);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_delete_end);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_invalid);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_force);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_force_invalid);
+
+	return atf_no_error();
+}
Index: tests/sys/uvm/t_uvm_physseg_load.c
===================================================================
RCS file: tests/sys/uvm/t_uvm_physseg_load.c
diff -N tests/sys/uvm/t_uvm_physseg_load.c
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ tests/sys/uvm/t_uvm_physseg_load.c	13 Dec 2016 12:44:57 -0000
@@@@ -0,0 +1,741 @@@@
+/* $NetBSD$ */
+
+/*-
+ * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Santhosh N. Raju <santhosh.raju@@gmail.com> and
+ * by Cherry G. Mathew
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__RCSID("$NetBSD$");
+
+/* Testing API - assumes userland */
+/* Provide Kernel API equivalents */
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h> /* memset(3) et. al */
+#include <stdio.h> /* printf(3) */
+#include <stdlib.h> /* malloc(3) */
+#include <stdarg.h>
+#include <stddef.h>
+#include <time.h>
+
+#define	PRIxPADDR	"lx"
+#define	PRIxPSIZE	"lx"
+#define	PRIuPSIZE	"lu"
+#define	PRIxVADDR	"lx"
+#define	PRIxVSIZE	"lx"
+#define	PRIuVSIZE	"lu"
+
+#define UVM_HOTPLUG /* Enable hotplug with rbtree. */
+#define PMAP_STEAL_MEMORY
+#define DEBUG /* Enable debug functionality. */
+
+typedef unsigned long vaddr_t;
+typedef unsigned long paddr_t;
+typedef unsigned long psize_t;
+typedef unsigned long vsize_t;
+
+#include <uvm/uvm_page.h>
+
+/*
+ * If this line is commented out tests related touvm_physseg_get_pmseg()
+ * wont run.
+ *
+ * Have a look at machine/uvm_physseg.h for more details.
+ */
+#define __HAVE_PMAP_PHYSSEG
+
+#include <uvm/uvm_physseg.h>
+
+/*
+ * This is a dummy struct used for testing purposes
+ *
+ * In reality this struct would exist in the MD part of the code residing in
+ * machines/vmparam.h
+ */
+
+#ifdef __HAVE_PMAP_PHYSSEG
+struct pmap_physseg {
+	bool dummy_variable;		/* Dummy variable use for testing */
+};
+#endif
+
+#ifndef DIAGNOSTIC
+#define	KASSERTMSG(e, msg, ...)	/* NOTHING */
+#define	KASSERT(e)		/* NOTHING */
+#else
+#define	KASSERT(a)		assert(a)
+#define KASSERTMSG(exp, ...)    printf(__VA_ARGS__); assert((exp))
+#endif
+
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+
+#define VM_NFREELIST            4
+#define VM_FREELIST_DEFAULT     0
+#define VM_FREELIST_FIRST16     3
+#define VM_FREELIST_FIRST1G     2
+#define VM_FREELIST_FIRST4G     1
+
+/*
+ * Used in tests when Array implementation is tested
+ */
+#if !defined(VM_PHYSSEG_MAX)
+#define VM_PHYSSEG_MAX          32
+#endif
+
+#define PAGE_SIZE               4096
+#define PAGE_SHIFT              12
+#define atop(x)         (((paddr_t)(x)) >> PAGE_SHIFT)
+
+#define	mutex_enter(l)
+#define	mutex_exit(l)
+
+#define	_SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
+/* free(p) XXX: pgs management need more thought */
+#define kmem_alloc(size, flags) malloc(size)
+#define kmem_zalloc(size, flags) malloc(size)
+#define kmem_free(p, size) free(p)
+
+psize_t physmem;
+
+struct uvmexp uvmexp;        /* decl */
+
+/*
+ * uvm structure borrowed from uvm.h
+ *
+ * Remember this is a dummy structure used within the ATF Tests and
+ * uses only necessary fields from the original uvm struct.
+ * See uvm/uvm.h for the full struct.
+ */
+
+struct uvm {
+	/* vm_page related parameters */
+
+	bool page_init_done;		/* TRUE if uvm_page_init() finished */
+} uvm;
+
+static void
+panic(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vprintf(fmt, ap);
+	printf("\n");
+	va_end(ap);
+	KASSERT(false);
+
+	/*NOTREACHED*/
+}
+
+static void
+uvm_pagefree(struct vm_page *pg)
+{
+	return;
+}
+
+#if defined(UVM_HOTPLUG)
+static void
+uvmpdpol_reinit(void)
+{
+	return;
+}
+#endif /* UVM_HOTPLUG */
+
+/* end - Provide Kernel API equivalents */
+
+#include "uvm/uvm_physseg.c"
+
+#include <atf-c.h>
+
+#define ONE_MEGABYTE 1024 * 1024
+
+/* Sample Page Frame Numbers */
+#define VALID_START_PFN_1 atop(0)
+#define VALID_END_PFN_1 atop(ONE_MEGABYTE)
+#define VALID_AVAIL_START_PFN_1 atop(0)
+#define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
+
+#define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
+#define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
+#define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
+#define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
+
+#define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
+#define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
+#define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
+#define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
+
+#define VALID_START_PFN_4 atop(ONE_MEGABYTE + 1)
+#define VALID_END_PFN_4 atop(ONE_MEGABYTE * 128)
+#define VALID_AVAIL_START_PFN_4 atop(ONE_MEGABYTE + 1)
+#define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 128)
+
+#define VALID_START_PFN_5 atop(ONE_MEGABYTE + 1)
+#define VALID_END_PFN_5 atop(ONE_MEGABYTE * 256)
+#define VALID_AVAIL_START_PFN_5 atop(ONE_MEGABYTE + 1)
+#define VALID_AVAIL_END_PFN_5 atop(ONE_MEGABYTE * 256)
+
+/*
+ * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
+ */
+#define PAGE_COUNT_1M      256
+
+/*
+ * The number of Page Frames to allot per segment
+ */
+#define PF_STEP 8
+
+/*
+ * A debug fucntion to print the content of upm.
+ */
+static inline void
+uvm_physseg_dump_seg(uvm_physseg_t upm)
+{
+#if defined(DEBUG)
+	printf("%s: seg->start == %ld\n", __func__,
+	    uvm_physseg_get_start(upm));
+	printf("%s: seg->end == %ld\n", __func__,
+	    uvm_physseg_get_end(upm));
+	printf("%s: seg->avail_start == %ld\n", __func__,
+	    uvm_physseg_get_avail_start(upm));
+	printf("%s: seg->avail_end == %ld\n", __func__,
+	    uvm_physseg_get_avail_end(upm));
+
+	printf("====\n\n");
+#else
+	return;
+#endif /* DEBUG */
+}
+
+/*
+ * Private accessor that gets the value of vm_physmem.nentries
+ */
+static int
+uvm_physseg_get_entries(void)
+{
+#if defined(UVM_HOTPLUG)
+	return uvm_physseg_graph.nentries;
+#else
+	return vm_nphysmem;
+#endif /* UVM_HOTPLUG */
+}
+
+/*
+ * Note: This function replicates verbatim what happens in
+ * uvm_page.c:uvm_page_init().
+ *
+ * Please track any changes that happen there.
+ */
+static void
+uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
+{
+	uvm_physseg_t bank;
+	size_t n;
+
+	for (bank = uvm_physseg_get_first(),
+		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
+	     uvm_physseg_valid(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+
+		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+		uvm_physseg_seg_alloc_from_slab(bank, n);
+		uvm_physseg_init_seg(bank, pagearray);
+
+		/* set up page array pointers */
+		pagearray += n;
+		pagecount -= n;
+	}
+
+	uvm.page_init_done = true;
+}
+
+/*
+ * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
+ * back from an I/O mapping (ugh!).   used in some MD code as well.
+ */
+static struct vm_page *
+uvm_phys_to_vm_page(paddr_t pa)
+{
+	paddr_t pf = atop(pa);
+	paddr_t off;
+	uvm_physseg_t psi;
+
+	psi = uvm_physseg_find(pf, &off);
+	if (psi != UVM_PHYSSEG_TYPE_INVALID)
+		return uvm_physseg_get_pg(psi, off);
+	return(NULL);
+}
+
+//static paddr_t
+//uvm_vm_page_to_phys(const struct vm_page *pg)
+//{
+//
+//	return pg->phys_addr;
+//}
+
+/*
+ * XXX: To do, write control test cases for uvm_vm_page_to_phys().
+ */
+
+/* #define VM_PAGE_TO_PHYS(entry)  uvm_vm_page_to_phys(entry) */
+
+#define PHYS_TO_VM_PAGE(pa)     uvm_phys_to_vm_page(pa)
+
+/*
+ * Test Fixture SetUp().
+ */
+static void
+setup(void)
+{
+	/* Prerequisites for running certain calls in uvm_physseg */
+	uvmexp.pagesize = PAGE_SIZE;
+	uvmexp.npages = 0;
+	uvm.page_init_done = false;
+	uvm_physseg_init();
+}
+
+ATF_TC(uvm_physseg_100);
+ATF_TC_HEAD(uvm_physseg_100, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            100 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_100, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 100; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_1K);
+ATF_TC_HEAD(uvm_physseg_1K, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            1000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_1K, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 1000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_10K);
+ATF_TC_HEAD(uvm_physseg_10K, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_10K, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 10000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_100K);
+ATF_TC_HEAD(uvm_physseg_100K, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            100,000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_100K, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 100000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_1M);
+ATF_TC_HEAD(uvm_physseg_1M, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            1,000,000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_1M, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 1000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_10M);
+ATF_TC_HEAD(uvm_physseg_10M, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000,000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_10M, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 10000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_100M);
+ATF_TC_HEAD(uvm_physseg_100M, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            100,000,000 calls, VM_PHYSSEG_MAX is 32.");
+}
+ATF_TC_BODY(uvm_physseg_100M, tc)
+{
+	paddr_t pa;
+
+	setup();
+
+	for(paddr_t i = VALID_START_PFN_1;
+	    i < VALID_END_PFN_1; i += PF_STEP) {
+		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
+		    VM_FREELIST_DEFAULT);
+	}
+
+	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(int i = 0; i < 100000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_1MB);
+ATF_TC_HEAD(uvm_physseg_1MB, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000,000 calls, VM_PHYSSEG_MAX is 32 on 1 MB Segment.");
+}
+ATF_TC_BODY(uvm_physseg_1MB, t)
+{
+	paddr_t pa = 0;
+
+	paddr_t pf = 0;
+
+	psize_t pf_chunk_size = 0;
+
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page) *
+	    (npages1 + npages2));
+
+	setup();
+
+	/* We start with zero segments */
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(pf = VALID_START_PFN_2; pf < VALID_END_PFN_2; pf += PF_STEP) {
+		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
+		uvm_physseg_unplug(pf, pf_chunk_size);
+	}
+
+	for(int i = 0; i < 10000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_2);
+		if(pa < ctob(VALID_START_PFN_2))
+			pa += ctob(VALID_START_PFN_2);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_64MB);
+ATF_TC_HEAD(uvm_physseg_64MB, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000,000 calls, VM_PHYSSEG_MAX is 32 on 64 MB Segment.");
+}
+ATF_TC_BODY(uvm_physseg_64MB, t)
+{
+	paddr_t pa = 0;
+
+	paddr_t pf = 0;
+
+	psize_t pf_chunk_size = 0;
+
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	psize_t npages2 = (VALID_END_PFN_3 - VALID_START_PFN_3);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page)  *
+	    (npages1 + npages2));
+
+	setup();
+
+	/* We start with zero segments */
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_3, npages2, NULL));
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(pf = VALID_START_PFN_3; pf < VALID_END_PFN_3; pf += PF_STEP) {
+		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
+		uvm_physseg_unplug(pf, pf_chunk_size);
+	}
+
+	for(int i = 0; i < 10000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_3);
+		if(pa < ctob(VALID_START_PFN_3))
+			pa += ctob(VALID_START_PFN_3);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_128MB);
+ATF_TC_HEAD(uvm_physseg_128MB, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000,000 calls, VM_PHYSSEG_MAX is 32 on 128 MB Segment.");
+}
+ATF_TC_BODY(uvm_physseg_128MB, t)
+{
+	paddr_t pa = 0;
+
+	paddr_t pf = 0;
+
+	psize_t pf_chunk_size = 0;
+
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	psize_t npages2 = (VALID_END_PFN_4 - VALID_START_PFN_4);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page)
+	    * (npages1 + npages2));
+
+	setup();
+
+	/* We start with zero segments */
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(pf = VALID_START_PFN_4; pf < VALID_END_PFN_4; pf += PF_STEP) {
+		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
+		uvm_physseg_unplug(pf, pf_chunk_size);
+	}
+
+	for(int i = 0; i < 10000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_4);
+		if(pa < ctob(VALID_START_PFN_4))
+			pa += ctob(VALID_START_PFN_4);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TC(uvm_physseg_256MB);
+ATF_TC_HEAD(uvm_physseg_256MB, tc)
+{
+	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
+            10,000,000 calls, VM_PHYSSEG_MAX is 32 on 256 MB Segment.");
+}
+ATF_TC_BODY(uvm_physseg_256MB, t)
+{
+	paddr_t pa = 0;
+
+	paddr_t pf = 0;
+
+	psize_t pf_chunk_size = 0;
+
+	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	psize_t npages2 = (VALID_END_PFN_5 - VALID_START_PFN_5);
+
+	struct vm_page *slab = malloc(sizeof(struct vm_page)  * (npages1 + npages2));
+
+	setup();
+
+	/* We start with zero segments */
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
+	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
+
+	/* Post boot: Fake all segments and pages accounted for. */
+	uvm_page_init_fake(slab, npages1 + npages2);
+
+	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
+	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
+
+	srandom((unsigned)time(NULL));
+	for(pf = VALID_START_PFN_5; pf < VALID_END_PFN_5; pf += PF_STEP) {
+		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
+		uvm_physseg_unplug(pf, pf_chunk_size);
+	}
+
+	for(int i = 0; i < 10000000; i++) {
+		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_5);
+		if(pa < ctob(VALID_END_PFN_5))
+			pa += ctob(VALID_START_PFN_5);
+		PHYS_TO_VM_PAGE(pa);
+	}
+
+	ATF_CHECK_EQ(true, true);
+}
+
+ATF_TP_ADD_TCS(tp)
+{
+	/* Fixed memory size tests. */
+	ATF_TP_ADD_TC(tp, uvm_physseg_100);
+	ATF_TP_ADD_TC(tp, uvm_physseg_1K);
+	ATF_TP_ADD_TC(tp, uvm_physseg_10K);
+	ATF_TP_ADD_TC(tp, uvm_physseg_100K);
+	ATF_TP_ADD_TC(tp, uvm_physseg_1M);
+	ATF_TP_ADD_TC(tp, uvm_physseg_10M);
+	ATF_TP_ADD_TC(tp, uvm_physseg_100M);
+
+#if defined(UVM_HOTPLUG)
+	/* Variable memory size tests. */
+	ATF_TP_ADD_TC(tp, uvm_physseg_1MB);
+	ATF_TP_ADD_TC(tp, uvm_physseg_64MB);
+	ATF_TP_ADD_TC(tp, uvm_physseg_128MB);
+	ATF_TP_ADD_TC(tp, uvm_physseg_256MB);
+#endif /* UVM_HOTPLUG */
+
+	return atf_no_error();
+}
@


1.8
log
@Remove leading untracked file mess in the diff
@
text
@d6 2
a7 2
+++ tests/sys/uvm/Makefile	20 Nov 2016 07:20:53 -0000
@@@@ -0,0 +1,29 @@@@
d18 1
a18 1
+DPSRCS=		${NETBSDSRCDIR}/sys/uvm/uvm_physmem.[ch]
d21 3
a23 3
+TESTS_C+=	t_uvm_physmem
+SRCS.t_uvm_physmem+=	t_uvm_physmem.c subr_extent.c
+CPPFLAGS.t_uvm_physmem.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
d26 3
a28 8
+TESTS_C+=       t_uvm_physmem_load
+SRCS.t_uvm_physmem_load+=       t_uvm_physmem_load.c subr_extent.c
+CPPFLAGS.t_uvm_physmem_load.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
+
+TESTS_C+=       t_rump_uvm_physseg
+
+ADD_TO_LD=      -lrumpvfs -lrump -lrumpuser -lrump -lpthread 
+LDADD+=         ${ADD_TO_LD}
d32 1
a32 84
Index: tests/sys/uvm/t_rump_uvm_physseg.c
===================================================================
RCS file: tests/sys/uvm/t_rump_uvm_physseg.c
diff -N tests/sys/uvm/t_rump_uvm_physseg.c
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ tests/sys/uvm/t_rump_uvm_physseg.c	20 Nov 2016 07:20:53 -0000
@@@@ -0,0 +1,76 @@@@
+/* $NetBSD$ */
+
+/*-
+ * Copyright (c) 2016 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Santhosh N. Raju <santhosh.raju@@gmail.com> and
+ * Cherry G. Mathew
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/sysctl.h>
+
+#include <rump/rump.h>
+
+#include <atf-c.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+
+#define UVMWAIT_LIMIT 1024 * 1024
+
+ATF_TC(rump_uvm_physseg_plug);
+ATF_TC_HEAD(rump_uvm_physseg_plug, tc)
+{
+
+	atf_tc_set_md_var(tc, "descr", "Checks uvm_physseg_plug via rump");
+}
+
+ATF_TC_BODY(rump_uvm_physseg_plug, tc)
+{
+
+	rump_boot_sethowto(RUMP_AB_VERBOSE);
+        char buf[64];
+
+	/* limit rump kernel memory */
+	snprintf(buf, sizeof(buf), "%d", UVMWAIT_LIMIT);
+	setenv("RUMP_MEMLIMIT", buf, 1);
+
+	rump_init();
+	rump_printevcnts();
+//	rump_schedule();
+//	rump_unschedule();
+}
+
+
+ATF_TP_ADD_TCS(tp)
+{
+
+	ATF_TP_ADD_TC(tp, rump_uvm_physseg_plug);
+
+	return atf_no_error();
+}
Index: tests/sys/uvm/t_uvm_physmem.c
d34 2
a35 2
RCS file: tests/sys/uvm/t_uvm_physmem.c
diff -N tests/sys/uvm/t_uvm_physmem.c
d37 2
a38 2
+++ tests/sys/uvm/t_uvm_physmem.c	20 Nov 2016 07:20:55 -0000
@@@@ -0,0 +1,2196 @@@@
d103 1
a103 1
+ * If this line is commented out tests related to uvm_physmem_get_pmseg()
d106 1
a106 1
+ * Have a look at machine/uvm_physmem.h for more details.
d110 1
a110 1
+#include <uvm/uvm_physmem.h>
d231 1
a231 1
+#include "uvm/uvm_physmem.c"
d272 1
a272 1
+uvm_physmem_dump_seg(uvm_physmem_t upm)
d276 1
a276 1
+	    uvm_physmem_get_start(upm));
d278 1
a278 1
+	    uvm_physmem_get_end(upm));
d280 1
a280 1
+	    uvm_physmem_get_avail_start(upm));
d282 1
a282 1
+	    uvm_physmem_get_avail_end(upm));
d294 1
a294 1
+uvm_physmem_get_entries(void)
d305 1
a305 1
+uvm_physmem_alloc(size_t sz)
d317 1
a317 1
+	/* Prerequisites for running certain calls in uvm_physmem */
d321 1
a321 1
+	uvm_physmem_init();
d327 2
a328 2
+ATF_TC(uvm_physmem_alloc_atboot_mismatch);
+ATF_TC_HEAD(uvm_physmem_alloc_atboot_mismatch, tc)
d330 1
a330 1
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physmem_alloc() sanity"
d334 1
a334 1
+ATF_TC_BODY(uvm_physmem_alloc_atboot_mismatch, tc)
d340 1
a340 1
+	uvm_physmem_alloc(sizeof(struct uvm_physseg) - 1);
d343 2
a344 2
+ATF_TC(uvm_physmem_alloc_atboot_overrun);
+ATF_TC_HEAD(uvm_physmem_alloc_atboot_overrun, tc)
d346 1
a346 1
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physmem_alloc() sanity"
d350 1
a350 1
+ATF_TC_BODY(uvm_physmem_alloc_atboot_overrun, tc)
d356 1
a356 1
+	uvm_physmem_alloc((VM_PHYSSEG_MAX + 1) * sizeof(struct uvm_physseg));
d360 2
a361 2
+ATF_TC(uvm_physmem_alloc_sanity);
+ATF_TC_HEAD(uvm_physmem_alloc_sanity, tc)
d363 1
a363 1
+	atf_tc_set_md_var(tc, "descr", "further uvm_physmem_alloc() sanity checks");
d366 1
a366 1
+ATF_TC_BODY(uvm_physmem_alloc_sanity, tc)
d373 1
a373 1
+	ATF_REQUIRE(uvm_physmem_alloc(VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
d377 2
a378 2
+	ATF_REQUIRE(uvm_physmem_alloc(sizeof(struct uvm_physseg) - 1));
+	ATF_REQUIRE(uvm_physmem_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg)));
d381 2
a382 2
+ATF_TC(uvm_physmem_free_atboot_mismatch);
+ATF_TC_HEAD(uvm_physmem_free_atboot_mismatch, tc)
d384 1
a384 1
+	atf_tc_set_md_var(tc, "descr", "boot time uvm_physmem_free() sanity"
d388 1
a388 1
+ATF_TC_BODY(uvm_physmem_free_atboot_mismatch, tc)
d394 1
a394 1
+	uvm_physmem_free(&uvm_physseg[0], sizeof(struct uvm_physseg) - 1);
d397 2
a398 2
+ATF_TC(uvm_physmem_free_sanity);
+ATF_TC_HEAD(uvm_physmem_free_sanity, tc)
d400 1
a400 1
+	atf_tc_set_md_var(tc, "descr", "further uvm_physmem_free() sanity checks");
d403 1
a403 1
+ATF_TC_BODY(uvm_physmem_free_sanity, tc)
d415 2
a416 2
+	seg = uvm_physmem_alloc((VM_PHYSSEG_MAX - 1) * sizeof(*seg));
+	uvm_physmem_free(seg, (VM_PHYSSEG_MAX - 1) * sizeof(struct uvm_physseg));
d421 2
a422 2
+	seg = uvm_physmem_alloc(sizeof(struct uvm_physseg) - 1);
+	uvm_physmem_free(seg, sizeof(struct uvm_physseg) - 1);
d424 1
a424 1
+	seg = uvm_physmem_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
d426 1
a426 1
+	uvm_physmem_free(seg, 2 * VM_PHYSSEG_MAX * sizeof(struct uvm_physseg));
d429 2
a430 2
+ATF_TC(uvm_physmem_atboot_free_leak);
+ATF_TC_HEAD(uvm_physmem_atboot_free_leak, tc)
d435 1
a435 1
+ATF_TC_BODY(uvm_physmem_atboot_free_leak, tc)
d443 1
a443 1
+	seg = uvm_physmem_alloc(VM_PHYSSEG_MAX * sizeof(*seg));
d445 1
a445 1
+	uvm_physmem_free(seg, sizeof(*seg));
d449 1
a449 1
+	ATF_REQUIRE(uvm_physmem_alloc(sizeof(struct uvm_physseg)));
d462 1
a462 1
+	uvm_physmem_t bank;
d465 8
a472 8
+	for (bank = uvm_physmem_get_first(),
+		 uvm_physmem_seg_chomp_slab(bank, pagearray, pagecount);
+	     uvm_physmem_valid(bank);
+	     bank = uvm_physmem_get_next(bank)) {
+
+		n = uvm_physmem_get_end(bank) - uvm_physmem_get_start(bank);
+		uvm_physmem_seg_slab_alloc(bank, n);
+		uvm_physmem_init_seg(bank, pagearray);
d482 2
a483 2
+ATF_TC(uvm_physmem_plug);
+ATF_TC_HEAD(uvm_physmem_plug, tc)
d488 1
a488 1
+ATF_TC_BODY(uvm_physmem_plug, tc)
d490 1
a490 1
+	uvm_physmem_t upm1, upm2, upm3, upm4;
d502 2
a503 2
+	ATF_REQUIRE_EQ(uvm_physmem_plug(VALID_START_PFN_1, npages1, &upm1), true);
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d506 2
a507 2
+	ATF_REQUIRE_EQ(uvm_physmem_plug(VALID_START_PFN_2, npages2, &upm2), true);
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d515 2
a516 2
+	ATF_REQUIRE_EQ(uvm_physmem_plug(VALID_START_PFN_3, npages3, &upm3), true);
+	ATF_REQUIRE_EQ(3, uvm_physmem_get_entries());
d520 1
a520 1
+	pgs = uvm_physmem_get_pg(upm3, 0);
d524 1
a524 1
+	ATF_REQUIRE_EQ(uvm_physmem_plug(VALID_START_PFN_4, npages4, &upm4), true);
d526 1
a526 1
+	pgs = uvm_physmem_get_pg(upm4, 0);
d530 2
a531 2
+ATF_TC(uvm_physmem_unplug);
+ATF_TC_HEAD(uvm_physmem_unplug, tc)
d536 1
a536 1
+ATF_TC_BODY(uvm_physmem_unplug, tc)
d546 1
a546 1
+	uvm_physmem_t upm;
d552 2
a553 2
+	ATF_REQUIRE_EQ(true, uvm_physmem_plug(atop(0), atop(ONE_MEGABYTE), NULL));
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d555 1
a555 1
+	vm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
d560 1
a560 1
+	ATF_REQUIRE_EQ(true, uvm_physmem_unplug(atop(TWOFIFTYSIX_KILO), atop(FIVEONETWO_KILO)));
d563 1
a563 1
+	vm_physseg_find(atop(TWOFIFTYSIX_KILO), &pa);
d572 1
a572 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d574 1
a574 1
+	upm = vm_physseg_find(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), NULL);
d576 1
a576 1
+	ATF_REQUIRE(uvm_physmem_valid(upm));
d579 1
a579 1
+	ATF_REQUIRE_EQ(true, uvm_physmem_unplug(atop(TWOFIFTYSIX_KILO + FIVEONETWO_KILO), atop(TWOFIFTYSIX_KILO)));
d582 1
a582 1
+	ATF_REQUIRE_EQ(false, uvm_physmem_valid(upm));
d585 1
a585 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d588 1
a588 1
+	ATF_REQUIRE_EQ(true, uvm_physmem_unplug(atop(ONETWENTYEIGHT_KILO), atop(ONETWENTYEIGHT_KILO)));
d590 1
a590 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d593 1
a593 1
+	ATF_REQUIRE_EQ(true, uvm_physmem_unplug(0, atop(SIXTYFOUR_KILO)));
d595 1
a595 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d599 1
a599 1
+	ATF_REQUIRE_EQ(true, uvm_physmem_unplug(atop(SIXTYFOUR_KILO), atop(SIXTYFOUR_KILO)));
d606 2
a607 2
+ATF_TC(uvm_physmem_init);
+ATF_TC_HEAD(uvm_physmem_init, tc)
d612 1
a612 1
+ATF_TC_BODY(uvm_physmem_init, tc)
d614 1
a614 1
+	uvm_physmem_init();
d616 1
a616 1
+	ATF_REQUIRE_EQ(0, uvm_physmem_get_entries());
d627 1
a627 1
+	uvm_physmem_t upm;
d638 1
a638 1
+	ATF_REQUIRE(uvm_physmem_valid(upm));
d644 1
a644 1
+	ATF_CHECK_EQ(1, uvm_physmem_get_entries());
d653 1
a653 1
+	ATF_REQUIRE(uvm_physmem_valid(upm));
d658 1
a658 1
+	ATF_CHECK_EQ(2, uvm_physmem_get_entries());
d661 2
a662 2
+ATF_TC(uvm_physmem_handle_immutable);
+ATF_TC_HEAD(uvm_physmem_handle_immutable, tc)
d664 1
a664 1
+	atf_tc_set_md_var(tc, "descr", "Tests if the uvm_physmem_t handle is \
d667 1
a667 1
+ATF_TC_BODY(uvm_physmem_handle_immutable, tc)
d669 1
a669 1
+	uvm_physmem_t upm;
d680 1
a680 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d682 1
a682 1
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID_EMPTY, uvm_physmem_get_prev(upm));
d689 1
a689 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d692 1
a692 1
+	upm = uvm_physmem_get_prev(upm);
d694 1
d699 5
a703 3
+	ATF_CHECK(UVM_PHYSMEM_TYPE_INVALID_EMPTY != upm);
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
d706 2
a707 2
+ATF_TC(uvm_physmem_seg_chomp_slab);
+ATF_TC_HEAD(uvm_physmem_seg_chomp_slab, tc)
d712 1
a712 1
+ATF_TC_BODY(uvm_physmem_seg_chomp_slab, tc)
d717 1
a717 1
+	const size_t npages = UVM_PHYSMEM_BOOT_UNPLUG_MAX; /* Number of pages */
d724 1
a724 1
+	seg = uvm_physmem_alloc(sizeof(struct uvm_physseg));
d726 1
a726 1
+	uvm_physmem_seg_chomp_slab(PHYSMEM_NODE_TO_HANDLE(seg), slab, npages * 2);
d751 2
a752 2
+ATF_TC(uvm_physmem_seg_slab_alloc);
+ATF_TC_HEAD(uvm_physmem_seg_slab_alloc, tc)
d757 1
a757 1
+ATF_TC_BODY(uvm_physmem_seg_slab_alloc, tc)
d761 1
a761 1
+	const size_t npages = UVM_PHYSMEM_BOOT_UNPLUG_MAX; /* Number of pages */
d768 1
a768 1
+	seg = uvm_physmem_alloc(sizeof(struct uvm_physseg));
d770 1
a770 1
+	uvm_physmem_seg_chomp_slab(PHYSMEM_NODE_TO_HANDLE(seg), slab, npages * 2);
d772 1
a772 1
+	pgs = uvm_physmem_seg_slab_alloc(PHYSMEM_NODE_TO_HANDLE(seg), npages);
d779 1
a779 1
+	pgs = uvm_physmem_seg_slab_alloc(PHYSMEM_NODE_TO_HANDLE(seg), npages);
d784 1
a784 1
+	pgs = uvm_physmem_seg_slab_alloc(PHYSMEM_NODE_TO_HANDLE(seg), npages);
d788 2
a789 2
+ATF_TC(uvm_physmem_init_seg);
+ATF_TC_HEAD(uvm_physmem_init_seg, tc)
d791 1
a791 1
+	atf_tc_set_md_var(tc, "descr", "Tests if uvm_physmem_init_seg adds pages to"
d794 1
a794 1
+ATF_TC_BODY(uvm_physmem_init_seg, tc)
d798 1
a798 1
+	const size_t npages = UVM_PHYSMEM_BOOT_UNPLUG_MAX; /* Number of pages */
d805 1
a805 1
+	seg = uvm_physmem_alloc(sizeof(struct uvm_physseg));
d807 1
a807 1
+	uvm_physmem_seg_chomp_slab(PHYSMEM_NODE_TO_HANDLE(seg), slab, npages * 2);
d809 1
a809 1
+	pgs = uvm_physmem_seg_slab_alloc(PHYSMEM_NODE_TO_HANDLE(seg), npages);
d819 1
a819 1
+	uvm_physmem_init_seg(PHYSMEM_NODE_TO_HANDLE(seg), pgs);
d825 2
a826 2
+ATF_TC(uvm_physmem_init_seg);
+ATF_TC_HEAD(uvm_physmem_init_seg, tc)
d831 1
a831 1
+ATF_TC_BODY(uvm_physmem_init_seg, tc)
d833 1
a833 1
+	uvm_physmem_t upm;
d841 1
a841 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d852 1
a852 1
+	uvm_physmem_seg_chomp_slab(upm, pgs, npages);
d857 1
a857 1
+	uvm_physmem_seg_slab_alloc(upm, npages);
d860 1
a860 1
+	uvm_physmem_init_seg(upm, pgs);
d871 1
a871 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d879 2
a880 2
+ATF_TC(uvm_physmem_get_start);
+ATF_TC_HEAD(uvm_physmem_get_start, tc)
d885 1
a885 1
+ATF_TC_BODY(uvm_physmem_get_start, tc)
d892 1
a892 1
+	uvm_physmem_t upm;
d900 1
a900 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d907 1
a907 1
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
d912 1
a912 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d916 1
a916 1
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
d919 2
a920 2
+ATF_TC(uvm_physmem_get_start_invalid);
+ATF_TC_HEAD(uvm_physmem_get_start_invalid, tc)
d923 1
a923 1
+	    correctly when uvm_physmem_get_start() is called with invalid \
d926 1
a926 1
+ATF_TC_BODY(uvm_physmem_get_start_invalid, tc)
d938 1
a938 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d942 1
a942 1
+	/* Invalid uvm_physmem_t */
d944 1
a944 1
+	    uvm_physmem_get_start(UVM_PHYSMEM_TYPE_INVALID));
d947 2
a948 2
+ATF_TC(uvm_physmem_get_end);
+ATF_TC_HEAD(uvm_physmem_get_end, tc)
d953 1
a953 1
+ATF_TC_BODY(uvm_physmem_get_end, tc)
d960 1
a960 1
+	uvm_physmem_t upm;
d968 1
a968 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d975 1
a975 1
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
d980 1
a980 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d984 1
a984 1
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
d987 2
a988 2
+ATF_TC(uvm_physmem_get_end_invalid);
+ATF_TC_HEAD(uvm_physmem_get_end_invalid, tc)
d991 1
a991 1
+	    correctly when uvm_physmem_get_end() is called with invalid \
d994 1
a994 1
+ATF_TC_BODY(uvm_physmem_get_end_invalid, tc)
d1006 1
a1006 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1010 1
a1010 1
+	/* Invalid uvm_physmem_t */
d1012 1
a1012 1
+	    uvm_physmem_get_end(UVM_PHYSMEM_TYPE_INVALID));
d1015 2
a1016 2
+ATF_TC(uvm_physmem_get_avail_start);
+ATF_TC_HEAD(uvm_physmem_get_avail_start, tc)
d1021 1
a1021 1
+ATF_TC_BODY(uvm_physmem_get_avail_start, tc)
d1028 1
a1028 1
+	uvm_physmem_t upm;
d1036 1
a1036 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1043 1
a1043 1
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physmem_get_avail_start(upm));
d1050 1
a1050 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1052 1
a1052 1
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physmem_get_avail_start(upm));
d1055 2
a1056 2
+ATF_TC(uvm_physmem_get_avail_start_invalid);
+ATF_TC_HEAD(uvm_physmem_get_avail_start_invalid, tc)
d1059 1
a1059 1
+	    correctly when uvm_physmem_get_avail_start() is called with invalid\
d1062 1
a1062 1
+ATF_TC_BODY(uvm_physmem_get_avail_start_invalid, tc)
d1074 1
a1074 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1078 1
a1078 1
+	/* Invalid uvm_physmem_t */
d1080 1
a1080 1
+	    uvm_physmem_get_avail_start(UVM_PHYSMEM_TYPE_INVALID));
d1083 2
a1084 2
+ATF_TC(uvm_physmem_get_avail_end);
+ATF_TC_HEAD(uvm_physmem_get_avail_end, tc)
d1089 1
a1089 1
+ATF_TC_BODY(uvm_physmem_get_avail_end, tc)
d1096 1
a1096 1
+	uvm_physmem_t upm;
d1104 1
a1104 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1111 1
a1111 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physmem_get_avail_end(upm));
d1116 1
a1116 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1120 1
a1120 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d1123 2
a1124 2
+ATF_TC(uvm_physmem_get_avail_end_invalid);
+ATF_TC_HEAD(uvm_physmem_get_avail_end_invalid, tc)
d1127 1
a1127 1
+	    correctly when uvm_physmem_get_avail_end() is called with invalid\
d1130 1
a1130 1
+ATF_TC_BODY(uvm_physmem_get_avail_end_invalid, tc)
d1142 1
a1142 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1146 1
a1146 1
+	/* Invalid uvm_physmem_t */
d1148 1
a1148 1
+	    uvm_physmem_get_avail_end(UVM_PHYSMEM_TYPE_INVALID));
d1151 2
a1152 2
+ATF_TC(uvm_physmem_get_next);
+ATF_TC_HEAD(uvm_physmem_get_next, tc)
d1155 1
a1155 1
+            segment using the uvm_physmem_get_next() call.");
d1157 1
a1157 1
+ATF_TC_BODY(uvm_physmem_get_next, tc)
d1159 2
a1160 2
+	uvm_physmem_t upm;
+	uvm_physmem_t upm_next;
d1170 1
a1170 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1172 2
a1173 2
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID_OVERFLOW,
+	    uvm_physmem_get_next(upm));
d1180 1
a1180 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1182 1
a1182 1
+	upm = uvm_physmem_get_next(upm); /* Fetch Next */
d1185 2
a1186 2
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
d1193 1
a1193 1
+	ATF_REQUIRE_EQ(3, uvm_physmem_get_entries());
d1195 1
a1195 1
+	upm = uvm_physmem_get_next(upm); /* Fetch Next */
d1198 2
a1199 2
+	ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physmem_get_end(upm));
d1202 2
a1203 2
+ATF_TC(uvm_physmem_get_prev);
+ATF_TC_HEAD(uvm_physmem_get_prev, tc)
d1206 1
a1206 1
+            segment using the uvm_physmem_get_prev() call.");
d1208 1
a1208 1
+ATF_TC_BODY(uvm_physmem_get_prev, tc)
d1210 2
a1211 2
+	uvm_physmem_t upm;
+	uvm_physmem_t upm_prev;
d1219 1
a1219 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1221 2
a1222 2
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID_EMPTY,
+	    uvm_physmem_get_prev(upm_prev));
d1229 1
a1229 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1232 1
a1232 1
+	upm = uvm_physmem_get_prev(upm);
d1235 2
a1236 2
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
d1243 1
a1243 1
+	ATF_REQUIRE_EQ(3, uvm_physmem_get_entries());
d1246 1
a1246 1
+	 * This will return a UVM_PHYSMEM_TYPE_INVALID_EMPTY we are at the
d1249 1
a1249 1
+	upm = uvm_physmem_get_prev(upm);
d1251 1
a1251 1
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID_EMPTY, upm);
d1254 2
a1255 2
+ATF_TC(uvm_physmem_get_first);
+ATF_TC_HEAD(uvm_physmem_get_first, tc)
d1258 1
a1258 1
+            segment (lowest node) using the uvm_physmem_get_first() call.");
d1260 1
a1260 1
+ATF_TC_BODY(uvm_physmem_get_first, tc)
d1262 2
a1263 2
+	uvm_physmem_t upm = UVM_PHYSMEM_TYPE_INVALID_EMPTY;
+	uvm_physmem_t upm_first;
d1268 1
a1268 1
+	ATF_CHECK_EQ(upm, uvm_physmem_get_first());
d1275 1
a1275 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1278 1
a1278 1
+	upm = uvm_physmem_get_first();
d1280 4
a1283 4
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d1291 1
a1291 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1294 1
a1294 1
+	upm = uvm_physmem_get_first();
d1296 4
a1299 4
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physmem_get_avail_end(upm));
d1307 1
a1307 1
+	ATF_REQUIRE_EQ(3, uvm_physmem_get_entries());
d1310 1
a1310 1
+	upm = uvm_physmem_get_first();
d1312 4
a1315 4
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physmem_get_avail_end(upm));
d1318 2
a1319 2
+ATF_TC(uvm_physmem_get_last);
+ATF_TC_HEAD(uvm_physmem_get_last, tc)
d1322 1
a1322 1
+            segment using the uvm_physmem_get_last() call.");
d1324 1
a1324 1
+ATF_TC_BODY(uvm_physmem_get_last, tc)
d1326 2
a1327 2
+	uvm_physmem_t upm = UVM_PHYSMEM_TYPE_INVALID_EMPTY;
+	uvm_physmem_t upm_last;
d1332 1
a1332 1
+	ATF_CHECK_EQ(upm, uvm_physmem_get_last());
d1339 1
a1339 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1342 1
a1342 1
+	upm = uvm_physmem_get_last();
d1344 4
a1347 4
+	ATF_CHECK_EQ(VALID_START_PFN_1, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_1, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_1, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physmem_get_avail_end(upm));
d1355 1
a1355 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1358 1
a1358 1
+	upm = uvm_physmem_get_last();
d1360 4
a1363 4
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d1371 1
a1371 1
+	ATF_REQUIRE_EQ(3, uvm_physmem_get_entries());
d1374 1
a1374 1
+	upm = uvm_physmem_get_last();
d1376 4
a1379 4
+	ATF_CHECK_EQ(VALID_START_PFN_3, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_3, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_3, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3, uvm_physmem_get_avail_end(upm));
d1382 2
a1383 2
+ATF_TC(uvm_physmem_valid);
+ATF_TC_HEAD(uvm_physmem_valid, tc)
d1386 1
a1386 1
+            segment is valid using the uvm_physmem_valid() call.");
d1388 1
a1388 1
+ATF_TC_BODY(uvm_physmem_valid, tc)
d1394 1
a1394 1
+	uvm_physmem_t upm;
d1402 1
a1402 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1404 1
a1404 1
+	uvm_physmem_init_seg(upm, pgs);
d1408 1
a1408 1
+	ATF_CHECK_EQ(true, uvm_physmem_valid(upm));
d1411 2
a1412 2
+ATF_TC(uvm_physmem_valid_invalid);
+ATF_TC_HEAD(uvm_physmem_valid_invalid, tc)
d1415 1
a1415 1
+            segment is invalid using the uvm_physmem_valid() call.");
d1417 1
a1417 1
+ATF_TC_BODY(uvm_physmem_valid_invalid, tc)
d1419 1
a1419 1
+	uvm_physmem_t upm;
d1430 2
a1431 2
+	/* Invalid uvm_physmem_t */
+	ATF_CHECK_EQ(false, uvm_physmem_valid(UVM_PHYSMEM_TYPE_INVALID));
d1437 1
a1437 1
+	ATF_CHECK_EQ(false, uvm_physmem_valid(upm));
d1440 2
a1441 2
+ATF_TC(uvm_physmem_get_highest);
+ATF_TC_HEAD(uvm_physmem_get_highest, tc)
d1446 1
a1446 1
+ATF_TC_BODY(uvm_physmem_get_highest, tc)
d1453 1
a1453 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1 - 1, uvm_physmem_get_highest_frame());
d1459 1
a1459 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physmem_get_highest_frame());
d1465 1
a1465 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3 - 1, uvm_physmem_get_highest_frame());
d1468 2
a1469 2
+ATF_TC(uvm_physmem_get_free_list);
+ATF_TC_HEAD(uvm_physmem_get_free_list, tc)
d1473 1
a1473 1
+            uvm_physmem_get_free_list() call.");
d1475 1
a1475 1
+ATF_TC_BODY(uvm_physmem_get_free_list, tc)
d1477 1
a1477 1
+	uvm_physmem_t upm;
d1484 1
a1484 1
+	ATF_CHECK_EQ(VM_FREELIST_DEFAULT, uvm_physmem_get_free_list(upm));
d1489 1
a1489 1
+	ATF_CHECK_EQ(VM_FREELIST_FIRST16, uvm_physmem_get_free_list(upm));
d1494 1
a1494 1
+	ATF_CHECK_EQ(VM_FREELIST_FIRST1G, uvm_physmem_get_free_list(upm));
d1497 2
a1498 2
+ATF_TC(uvm_physmem_get_start_hint);
+ATF_TC_HEAD(uvm_physmem_get_start_hint, tc)
d1502 1
a1502 1
+            uvm_physmem_get_start_hint() call.");
d1504 1
a1504 1
+ATF_TC_BODY(uvm_physmem_get_start_hint, tc)
d1506 1
a1506 1
+	uvm_physmem_t upm;
d1513 1
a1513 1
+	ATF_CHECK_EQ(0, uvm_physmem_get_start_hint(upm));
d1516 2
a1517 2
+ATF_TC(uvm_physmem_set_start_hint);
+ATF_TC_HEAD(uvm_physmem_set_start_hint, tc)
d1521 1
a1521 1
+            uvm_physmem_set_start_hint() call.");
d1523 1
a1523 1
+ATF_TC_BODY(uvm_physmem_set_start_hint, tc)
d1529 1
a1529 1
+	uvm_physmem_t upm;
d1535 1
a1535 1
+	uvm_physmem_init_seg(upm, pgs);
d1537 1
a1537 1
+	ATF_CHECK_EQ(true, uvm_physmem_set_start_hint(upm, atop(128)));
d1540 1
a1540 1
+	ATF_CHECK_EQ(atop(128), uvm_physmem_get_start_hint(upm));
d1543 2
a1544 2
+ATF_TC(uvm_physmem_set_start_hint_invalid);
+ATF_TC_HEAD(uvm_physmem_set_start_hint_invalid, tc)
d1548 1
a1548 1
+            uvm_physmem_set_start_hint() call.");
d1550 1
a1550 1
+ATF_TC_BODY(uvm_physmem_set_start_hint_invalid, tc)
d1552 1
a1552 1
+	uvm_physmem_t upm;
d1563 1
a1563 1
+	ATF_CHECK_EQ(false, uvm_physmem_set_start_hint(upm, atop(128)));
d1569 1
a1569 1
+	ATF_CHECK_EQ(0, uvm_physmem_get_start_hint(upm));
d1572 2
a1573 2
+ATF_TC(uvm_physmem_get_pg);
+ATF_TC_HEAD(uvm_physmem_get_pg, tc)
d1576 1
a1576 1
+            is correct when fetched by uvm_physmem_get_pg() call.");
d1578 1
a1578 1
+ATF_TC_BODY(uvm_physmem_get_pg, tc)
d1586 1
a1586 1
+	uvm_physmem_t upm;
d1592 1
a1592 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1597 1
a1597 1
+	uvm_physmem_init_seg(upm, pgs);
d1604 1
a1604 1
+	extracted_pg = uvm_physmem_get_pg(upm, 5);
d1610 1
a1610 1
+	extracted_pg = uvm_physmem_get_pg(upm, 113);
d1616 2
a1617 2
+ATF_TC(uvm_physmem_get_pmseg);
+ATF_TC_HEAD(uvm_physmem_get_pmseg, tc)
d1620 1
a1620 1
+            struct is correct when fetched by uvm_physmem_get_pmseg() call.");
d1622 1
a1622 1
+ATF_TC_BODY(uvm_physmem_get_pmseg, tc)
d1632 1
a1632 1
+	uvm_physmem_t upm;
d1638 1
a1638 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1643 1
a1643 1
+	uvm_physmem_init_seg(upm, pgs);
d1652 1
a1652 1
+	extracted_pmseg = uvm_physmem_get_pmseg(upm);
d1667 1
a1667 1
+	extracted_pmseg = uvm_physmem_get_pmseg(upm);
d1679 1
a1679 1
+            is correct when an PFN is passed into vm_physseg_find() call. \
d1687 1
a1687 1
+	uvm_physmem_t upm_first, upm_second, result;
d1693 1
a1693 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1700 1
a1700 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1705 1
a1705 1
+	result = vm_physseg_find(atop(ONE_MEGABYTE - 1024), NULL);
d1707 8
a1714 8
+	ATF_CHECK_EQ(uvm_physmem_get_start(upm_first),
+	    uvm_physmem_get_start(result));
+	ATF_CHECK_EQ(uvm_physmem_get_end(upm_first),
+	    uvm_physmem_get_end(result));
+	ATF_CHECK_EQ(uvm_physmem_get_avail_start(upm_first),
+	    uvm_physmem_get_avail_start(result));
+	ATF_CHECK_EQ(uvm_physmem_get_avail_end(upm_first),
+	    uvm_physmem_get_avail_end(result));
d1719 1
a1719 1
+	result = vm_physseg_find(atop(ONE_MEGABYTE + 8192), &offset);
d1721 8
a1728 8
+	ATF_CHECK_EQ(uvm_physmem_get_start(upm_second),
+	    uvm_physmem_get_start(result));
+	ATF_CHECK_EQ(uvm_physmem_get_end(upm_second),
+	    uvm_physmem_get_end(result));
+	ATF_CHECK_EQ(uvm_physmem_get_avail_start(upm_second),
+	    uvm_physmem_get_avail_start(result));
+	ATF_CHECK_EQ(uvm_physmem_get_avail_end(upm_second),
+	    uvm_physmem_get_avail_end(result));
d1740 1
a1740 1
+            vm_physseg_find() call.");
d1750 1
a1750 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1755 2
a1756 2
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID,
+	    vm_physseg_find(atop(ONE_MEGABYTE * 3), NULL));
d1761 2
a1762 2
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID,
+	    vm_physseg_find(atop(ONE_MEGABYTE * 3), &offset));
d1784 1
a1784 1
+	uvm_physmem_t upm;
d1790 1
a1790 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1794 1
a1794 1
+	uvm_physmem_init_seg(upm, pgs);
d1806 1
a1806 1
+	    uvm_physmem_get_avail_start(upm));
d1808 1
a1808 1
+	ATF_CHECK_EQ(VALID_START_PFN_2 + 1, uvm_physmem_get_start(upm));
d1811 2
a1812 2
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d1828 1
a1828 1
+	uvm_physmem_t upm;
d1843 1
a1843 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1848 1
a1848 1
+		uvm_physmem_get_avail_start(upm) != uvm_physmem_get_start(upm));
d1871 1
a1871 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1, uvm_physmem_get_avail_end(upm));
d1873 1
a1873 1
+	ATF_CHECK_EQ(VALID_END_PFN_2 - 1, uvm_physmem_get_end(upm));
d1877 2
a1878 2
+	    uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
d1897 1
a1897 1
+	uvm_physmem_t upm;
d1911 1
a1911 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1916 1
a1916 1
+		uvm_physmem_get_avail_start(upm) != uvm_physmem_get_start(upm));
d1918 1
a1918 1
+	uvm_physmem_init_seg(upm, pgs);
d1927 1
a1927 1
+	    uvm_physmem_get_avail_start(upm));
d1929 3
a1931 3
+	    uvm_physmem_get_avail_end(upm));
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
d1948 1
a1948 1
+	uvm_physmem_t upm;
d1965 1
a1965 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d1977 1
a1977 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1983 1
a1983 1
+	ATF_CHECK_EQ(1, uvm_physmem_get_entries());
d1986 1
a1986 1
+	upm = uvm_physmem_get_first();
d1988 4
a1991 4
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d2009 1
a2009 1
+	uvm_physmem_t upm;
d2025 1
a2025 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d2037 1
a2037 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d2047 1
a2047 1
+	ATF_CHECK_EQ(1, uvm_physmem_get_entries());
d2050 1
a2050 1
+	upm = uvm_physmem_get_first();
d2052 4
a2055 4
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2, uvm_physmem_get_avail_start(upm));
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2, uvm_physmem_get_avail_end(upm));
d2072 1
a2072 1
+	uvm_physmem_t upm;
d2078 1
a2078 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d2082 1
a2082 1
+	uvm_physmem_init_seg(upm, pgs);
d2100 1
a2100 1
+	uvm_physmem_t upm;
d2110 1
a2110 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d2125 1
a2125 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d2128 1
a2128 1
+	    uvm_physmem_get_avail_start(upm));
d2137 1
a2137 1
+			ATF_CHECK_EQ(i + 1, uvm_physmem_get_avail_start(upm));
d2144 1
a2144 1
+	upm = vm_physseg_find(VALID_AVAIL_END_PFN_1 - 1, NULL);
d2149 1
a2149 1
+	ATF_CHECK_EQ(1, uvm_physmem_get_entries());
d2162 1
a2162 1
+	uvm_physmem_t upm;
d2168 1
a2168 1
+	ATF_REQUIRE_EQ(1, uvm_physmem_get_entries());
d2182 6
a2187 6
+	ATF_TP_ADD_TC(tp, uvm_physmem_alloc_atboot_mismatch);
+	ATF_TP_ADD_TC(tp, uvm_physmem_alloc_atboot_overrun);
+	ATF_TP_ADD_TC(tp, uvm_physmem_alloc_sanity);
+	ATF_TP_ADD_TC(tp, uvm_physmem_free_atboot_mismatch);
+	ATF_TP_ADD_TC(tp, uvm_physmem_free_sanity);
+	ATF_TP_ADD_TC(tp, uvm_physmem_atboot_free_leak);
d2190 2
a2191 2
+	ATF_TP_ADD_TC(tp, uvm_physmem_plug);
+	ATF_TP_ADD_TC(tp, uvm_physmem_unplug);
d2194 1
a2194 1
+	ATF_TP_ADD_TC(tp, uvm_physmem_init);
d2196 24
a2219 24
+	ATF_TP_ADD_TC(tp, uvm_physmem_handle_immutable);
+	ATF_TP_ADD_TC(tp, uvm_physmem_seg_chomp_slab);
+	ATF_TP_ADD_TC(tp, uvm_physmem_seg_slab_alloc);
+	ATF_TP_ADD_TC(tp, uvm_physmem_init_seg);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_start);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_start_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_end);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_end_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_avail_start);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_avail_start_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_avail_end);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_avail_end_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_next);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_prev);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_first);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_last);
+	ATF_TP_ADD_TC(tp, uvm_physmem_valid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_valid_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_highest);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_free_list);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_start_hint);
+	ATF_TP_ADD_TC(tp, uvm_physmem_set_start_hint);
+	ATF_TP_ADD_TC(tp, uvm_physmem_set_start_hint_invalid);
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_pg);
d2222 1
a2222 1
+	ATF_TP_ADD_TC(tp, uvm_physmem_get_pmseg);
d2238 748
@


1.7
log
@Some of the changes that Chuck suggested.
@
text
@a0 6
? tests/sys/uvm/.gdbinit
? tests/sys/uvm/Atffile
? tests/sys/uvm/t_rump_uvm_physseg
? tests/sys/uvm/t_uvm_physmem
? tests/sys/uvm/t_uvm_physmem_load
? tests/sys/uvm/t_uvm_physmem_load.c
@


1.6
log
@options UVM_HOTPLUG
@
text
@d1 6
d12 1
a12 1
+++ tests/sys/uvm/Makefile	17 Nov 2016 15:42:40 -0000
d29 1
a29 1
+CPPFLAGS.t_uvm_physmem.c= -D_EXTENT_TESTING -D__POOL_EXPOSE
d34 1
a34 1
+CPPFLAGS.t_uvm_physmem_load.c= -D_EXTENT_TESTING -D__POOL_EXPOSE
d48 1
a48 1
+++ tests/sys/uvm/t_rump_uvm_physseg.c	17 Nov 2016 15:42:40 -0000
d131 2
a132 2
+++ tests/sys/uvm/t_uvm_physmem.c	17 Nov 2016 15:42:41 -0000
@@@@ -0,0 +1,2198 @@@@
d185 3
a187 1
+#define UVM_HOTPLUG // Enable hotplug with rbtree.
d197 1
a197 1
+ * If this line is commented out tests related touvm_physmem_get_pmseg()
d219 4
d225 1
d238 1
a238 1
+#ifndef VM_PHYSSEG_MAX
a361 3
+#define DEBUG
+
+#ifdef DEBUG
d368 1
a378 1
+}
a379 6
+/*
+ * Dummy debug function to prevent compile errors.
+ */
+static inline void
+uvm_physmem_dump_seg(uvm_physmem_t upm)
+{
d381 1
a382 1
+#endif
d385 1
a385 1
+ * Private accessor that gets the value of vm_physmem.nentries
d391 1
a391 1
+	return vm_physmem.nentries;
d434 1
a434 1
+	uvm_physmem_alloc(sizeof(struct vm_physseg) - 1);
d450 1
a450 1
+	uvm_physmem_alloc((VM_PHYSSEG_MAX + 1) * sizeof(struct vm_physseg));
d467 1
a467 1
+	ATF_REQUIRE(uvm_physmem_alloc(VM_PHYSSEG_MAX * sizeof(struct vm_physseg)));
d471 2
a472 2
+	ATF_REQUIRE(uvm_physmem_alloc(sizeof(struct vm_physseg) - 1));
+	ATF_REQUIRE(uvm_physmem_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct vm_physseg)));
d488 1
a488 1
+	uvm_physmem_free(&uvm_physseg[0], sizeof(struct vm_physseg) - 1);
d508 1
a508 1
+	struct vm_physseg *seg;
d510 1
a510 1
+	uvm_physmem_free(seg, (VM_PHYSSEG_MAX - 1) * sizeof(struct vm_physseg));
d515 2
a516 2
+	seg = uvm_physmem_alloc(sizeof(struct vm_physseg) - 1);
+	uvm_physmem_free(seg, sizeof(struct vm_physseg) - 1);
d518 1
a518 1
+	seg = uvm_physmem_alloc(2 * VM_PHYSSEG_MAX * sizeof(struct vm_physseg));
d520 1
a520 1
+	uvm_physmem_free(seg, 2 * VM_PHYSSEG_MAX * sizeof(struct vm_physseg));
d536 1
a536 1
+	struct vm_physseg *seg;
d543 1
a543 1
+	ATF_REQUIRE(uvm_physmem_alloc(sizeof(struct vm_physseg)));
d805 1
a805 1
+	struct vm_physseg *seg;
d815 1
a815 1
+	seg = uvm_physmem_alloc(sizeof(struct vm_physseg));
d850 1
a850 1
+	struct vm_physseg *seg;
d859 1
a859 1
+	seg = uvm_physmem_alloc(sizeof(struct vm_physseg));
d887 1
a887 1
+	struct vm_physseg *seg;
d896 1
a896 1
+	seg = uvm_physmem_alloc(sizeof(struct vm_physseg));
@


1.5
log
@hotplug with balloon(4) implementation.
Includes skrll@@ patches.
@
text
@d6 2
a7 2
+++ tests/sys/uvm/Makefile	13 Nov 2016 11:06:33 -0000
@@@@ -0,0 +1,28 @@@@
d23 1
a23 1
+CPPFLAGS.t_uvm_physmem.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -D_KERNTYPES
d26 3
a28 2
+
+
d42 1
a42 1
+++ tests/sys/uvm/t_rump_uvm_physseg.c	13 Nov 2016 11:06:33 -0000
d125 2
a126 2
+++ tests/sys/uvm/t_uvm_physmem.c	13 Nov 2016 11:06:59 -0000
@@@@ -0,0 +1,2150 @@@@
d172 9
d196 2
d301 1
d307 1
a311 1
+#include <uvm/uvm_physmem.h>
d386 1
d388 3
d393 8
d416 1
d463 1
a463 1
+	ATF_REQUIRE(uvm_physmem_alloc(VM_PHYSSEG_MAX * sizeof(struct vm_physseg)));	
d503 1
a503 1
+	 
d507 1
a507 1
+	
d513 1
a513 1
+	
d515 1
a515 1
+	
d541 1
d554 1
a554 1
+	
d563 1
a563 1
+		
d568 1
a568 1
+	    
d586 1
a586 1
+	
d588 1
a588 1
+	
d603 1
a603 1
+	
d612 1
a612 1
+	
d636 2
d640 1
a640 1
+	
d664 4
d671 3
d720 1
a720 1
+	
d722 1
a722 1
+	
d738 1
a738 1
+	
d805 1
a805 1
+	
d813 1
a813 1
+	uvm_physmem_seg_chomp_slab(seg, slab, npages * 2);
d824 1
a824 1
+		    
d827 1
a827 1
+	
d829 1
a829 1
+	   
d857 1
a857 1
+	uvm_physmem_seg_chomp_slab(seg, slab, npages * 2);
d859 1
a859 1
+	pgs = uvm_physmem_seg_slab_alloc(seg, npages);
d866 1
a866 1
+	pgs = uvm_physmem_seg_slab_alloc(seg, npages);
d871 1
a871 1
+	pgs = uvm_physmem_seg_slab_alloc(seg, npages);
a892 2
+	
+	uvm_physmem_seg_chomp_slab(seg, slab, npages * 2);
d894 3
a896 1
+	pgs = uvm_physmem_seg_slab_alloc(seg, npages);
d906 1
a906 1
+	uvm_physmem_init_seg(seg, pgs);
d952 1
a952 1
+	
d955 1
a955 1
+	
d976 1
a976 1
+	
d1044 1
a1044 1
+	
d1112 1
a1112 1
+	
d1180 1
a1180 1
+	
d2101 1
a2101 1
+	
d2222 3
a2224 1
+		ATF_CHECK_EQ(i + 1, uvm_physmem_get_avail_start(upm));
d2227 9
d2267 1
d2275 2
a2276 1
+
@


1.4
log
@Move to the rbtree implementation. tests + MD stuff
.'
@
text
@d6 2
a7 2
+++ tests/sys/uvm/Makefile	30 Oct 2016 17:25:52 -0000
@@@@ -0,0 +1,19 @@@@
d10 1
d18 10
a27 1
+DPSRCS=${NETBSDSRCDIR}/sys/uvm/uvm_physmem.[ch]
a28 1
+TESTS_C=	t_uvm_physmem
d31 1
a31 1
+ADD_TO_LD=      -lrumpvfs -lrump -lrumpuser -lrump -lpthread
d41 1
a41 1
+++ tests/sys/uvm/t_rump_uvm_physseg.c	30 Oct 2016 17:25:52 -0000
d124 2
a125 2
+++ tests/sys/uvm/t_uvm_physmem.c	30 Oct 2016 17:25:52 -0000
@@@@ -0,0 +1,1791 @@@@
d200 1
a216 1
+#define PAGE_SIZE               4096
d218 2
d221 1
d223 2
a224 5
+#define	_SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
+/* free(p) XXX: pgs management need more thought */
+#define kmem_alloc(size, flags) malloc(size)
+#define kmem_zalloc(size, flags) malloc(size)
+#define kmem_free(p, size) free(p)
d244 25
d297 1
a298 1
+
d303 6
a308 1
+#define ONE_MEGABYTE 1024 * 1024
d326 5
d389 2
d516 140
d657 3
d681 6
d688 1
a688 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d691 3
d700 4
a703 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d706 4
a709 1
+	ATF_REQUIRE_EQ(0, uvmexp.npages);
d728 1
d757 119
d884 1
a885 1
+
a887 2
+	uvm_physmem_t upm;
+
d896 14
d913 4
d919 1
a919 1
+
a921 1
+
d926 1
d928 1
d938 4
a941 3
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
d953 2
a954 1
+	uvm_physmem_init_seg(upm, pgs);
d1006 4
a1009 3
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
d1021 2
a1022 1
+	uvm_physmem_init_seg(upm, pgs);
d1074 4
a1077 3
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
d1089 2
a1090 1
+	uvm_physmem_init_seg(upm, pgs);
d1142 4
a1145 3
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
d1157 2
a1158 1
+	uvm_physmem_init_seg(upm, pgs);
d1495 1
a1495 1
+            the highest PFN using  the uvm_physmem_get_highest() call.");
d1504 1
a1504 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_1, uvm_physmem_get_highest());
d1510 1
a1510 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3, uvm_physmem_get_highest());
d1516 1
a1516 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_3, uvm_physmem_get_highest());
d1877 1
a1877 1
+	psize_t npages = (VALID_END_PFN_2 - VALID_START_PFN_2);
d1879 3
a1881 1
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
d1883 1
a1883 1
+	paddr_t p = 0;
a1884 1
+	uvm_physmem_t upm;
d1901 2
a1902 1
+	uvm_physmem_init_seg(upm, pgs);
a1996 2
+	struct vm_page *pgs_delete = malloc(sizeof(struct vm_page));
+
d2000 5
d2020 2
a2021 1
+	uvm_physmem_init_seg(upm, pgs_delete);
a2057 2
+	struct vm_page *pgs_delete = malloc(sizeof(struct vm_page));
+
d2061 2
d2064 2
d2080 2
a2081 1
+	uvm_physmem_init_seg(upm, pgs_delete);
a2148 4
+	psize_t npages = (VALID_END_PFN_1 - VALID_START_PFN_1);
+
+	struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages);
+
d2152 4
d2165 2
a2166 1
+	uvm_physmem_init_seg(upm, pgs);
d2220 1
d2227 5
a2231 1
+	    
d2235 2
d2258 1
d2264 1
@


1.3
log
@Chop off unknown file listings in the diff.
@
text
@a0 15
Index: tests/sys/uvm/Atffile
===================================================================
RCS file: tests/sys/uvm/Atffile
diff -N tests/sys/uvm/Atffile
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ tests/sys/uvm/Atffile	28 Oct 2016 08:24:12 -0000
@@@@ -0,0 +1,8 @@@@
+Content-Type: application/X-atf-atffile; version="1"
+
+# Automatically generated by bsd.test.mk.
+
+prop: test-suite = "NetBSD"
+
+tp: t_uvm_physmem
+tp: t_rump_uvm_physseg
d6 1
a6 1
+++ tests/sys/uvm/Makefile	28 Oct 2016 08:24:12 -0000
d32 1
a32 1
+++ tests/sys/uvm/t_rump_uvm_physseg.c	28 Oct 2016 08:24:12 -0000
d115 2
a116 2
+++ tests/sys/uvm/t_uvm_physmem.c	28 Oct 2016 08:24:12 -0000
@@@@ -0,0 +1,1658 @@@@
d329 1
a329 1
+	return vm_nphysmem;
d345 126
d1861 7
@


1.2
log
@Immutable uvm_physmem_t test, shakedown of tests to make
sure that the semantics are clear.
Tested on amd64
@
text
@a0 3
? tests/sys/uvm/.gdbinit
? tests/sys/uvm/t_rump_uvm_physseg
? tests/sys/uvm/t_uvm_physmem
@


1.1
log
@Initial revision
@
text
@d1 3
d9 2
a10 2
+++ tests/sys/uvm/Atffile	20 Oct 2016 13:45:19 -0000
@@@@ -0,0 +1,7 @@@@
d17 1
d24 1
a24 1
+++ tests/sys/uvm/Makefile	20 Oct 2016 13:45:19 -0000
d50 1
a50 1
+++ tests/sys/uvm/t_rump_uvm_physseg.c	20 Oct 2016 13:45:19 -0000
d52 1
a52 1
+/*	$NetBSD: t_vm.c,v 1.3 2012/03/17 18:00:28 hannken Exp $	*/
d133 2
a134 2
+++ tests/sys/uvm/t_uvm_physmem.c	20 Oct 2016 13:45:21 -0000
@@@@ -0,0 +1,1520 @@@@
a202 1
+
d210 1
a210 1
+#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM
d305 6
a310 1
+/* Page Counts for 1MB of memory */
d312 1
d314 1
a314 3
+ * Total number of pages (of 4K size each) should be 256, But 3 Pages (i386) are
+ * reserved by PAGE_SHIFT so you get 253 pages when you init it for the first
+ * time. This happens when Pages are created by uvm_physmem_init_seg() call.
d316 11
a326 1
+#define INIT_PAGE_COUNT_1M      253
d328 12
a339 2
+/* Rest of the inserts will have exactly 256 */
+#define PAGE_COUNT_1M           256
d345 2
a346 1
+uvm_physmem_get_entries(void) {
d403 41
d459 1
a459 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a463 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d470 1
a470 3
+	ATF_CHECK_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
+
+	printf("%lu npages, %d uvmexp.npages\n", npages, uvmexp.npages);
d472 1
a472 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d477 2
a478 2
+	/* We added another 1MB so INIT_PAGE_COUNT_1M + PAGE_COUNT_1M */
+	ATF_CHECK_EQ(INIT_PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
d496 1
a496 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a502 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d505 1
a505 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d509 1
a509 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d514 1
a514 4
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	/* There are only two, and the 2nd node is higher than the first */
+	upm = uvm_physmem_get_next(upm);
d533 1
a533 1
+	// Force other check conditions
d543 2
a544 1
+	ATF_CHECK_EQ((paddr_t) -1, uvm_physmem_get_start(UVM_PHYSMEM_TYPE_INVALID));
d562 1
a562 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a568 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d571 1
a571 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d575 1
a575 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d580 1
a580 4
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	/* There are only two, and the 2nd node is higher than the first */
+	upm = uvm_physmem_get_next(upm);
d599 1
a599 1
+	// Force other check conditions
d609 2
a610 1
+	ATF_CHECK_EQ((paddr_t) -1, uvm_physmem_get_end(UVM_PHYSMEM_TYPE_INVALID));
d628 1
a628 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a634 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d637 1
a637 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d641 1
a641 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d644 1
a644 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
a647 3
+	/* There are only two, and the 2nd node is higher than the first */
+	upm = uvm_physmem_get_next(upm);
+
d665 1
a665 1
+	// Force other check conditions
d694 1
a694 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a700 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d703 1
a703 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d707 1
a707 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d712 1
a712 4
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M + PAGE_COUNT_1M, uvmexp.npages);
+
+	/* There are only two, and the 2nd node is higher than the first */
+	upm = uvm_physmem_get_next(upm);
d731 1
a731 1
+	// Force other check conditions
d754 1
d759 1
a759 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d766 2
a767 4
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID, uvm_physmem_get_next(upm));
d769 1
a769 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d778 1
a778 1
+	ATF_CHECK(UVM_PHYSMEM_TYPE_INVALID != upm);
d782 1
a782 1
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
d791 1
a791 1
+	ATF_CHECK(UVM_PHYSMEM_TYPE_INVALID != upm);
d805 1
a805 2
+
+	/* We insert the segments in out of order */
d808 2
a809 2
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
+	    VALID_AVAIL_START_PFN_2, VALID_AVAIL_END_PFN_2, VM_FREELIST_DEFAULT);
d815 2
a816 2
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
d818 2
a819 4
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID, uvm_physmem_get_prev(upm));
+
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
+	    VALID_AVAIL_START_PFN_1, VALID_AVAIL_END_PFN_1, VM_FREELIST_DEFAULT);
d828 1
a828 1
+	ATF_CHECK(UVM_PHYSMEM_TYPE_INVALID != upm);
d839 4
a842 1
+	/* This will return a UVM_PHYSMEM_TYPE_INVALID we are at the lowest */
d845 1
a845 1
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID, upm);
d856 2
a857 1
+	uvm_physmem_t upm;
d862 1
a862 1
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID, uvm_physmem_get_first());
d864 1
a864 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d873 1
d880 1
a880 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d889 1
d896 1
a896 1
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
d905 1
d920 2
a921 1
+	uvm_physmem_t upm;
d926 1
a926 1
+	ATF_CHECK_EQ(UVM_PHYSMEM_TYPE_INVALID, uvm_physmem_get_last());
d928 1
a928 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d937 1
d944 1
a944 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
d953 1
d960 1
a960 1
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
d969 1
d991 1
a991 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a997 3
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
d1000 1
a1000 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d1016 1
a1016 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d1019 1
a1019 1
+	// Force other check conditions
d1024 1
a1024 4
+	/* There is only one segment. */
+	upm = uvm_physmem_get_first();
+
+	// Invalid uvm_physmem_t
d1027 4
a1030 1
+	// Without any pages initialized for segment, it is considered invalid
d1075 1
a1075 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1077 2
+	upm = uvm_physmem_get_first();
+
d1080 1
a1080 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
a1082 2
+	upm = uvm_physmem_get_next(upm);
+
d1085 1
a1085 1
+	uvm_page_physload(VALID_START_PFN_3, VALID_END_PFN_3,
a1087 2
+	upm = uvm_physmem_get_next(upm);
+
d1103 1
a1103 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1105 2
+	upm = uvm_physmem_get_first();
+
d1126 1
a1126 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1128 2
+	upm = uvm_physmem_get_first();
+
d1149 1
a1149 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d1152 1
a1152 3
+	upm = uvm_physmem_get_first();
+
+	// Force other check conditions
d1183 1
a1183 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1185 2
+	upm = uvm_physmem_get_first();
+
d1193 1
a1193 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d1229 1
a1229 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1231 2
+	upm = uvm_physmem_get_first();
+
d1239 1
a1239 1
+	ATF_REQUIRE_EQ(INIT_PAGE_COUNT_1M, uvmexp.npages);
d1256 2
a1257 1
+	pmseg.dummy_variable = false; /* Invert value to ensure test integrity */
d1284 1
a1284 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
d1291 1
a1291 3
+	upm_first = uvm_physmem_get_first();
+
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
a1293 2
+	upm_second = uvm_physmem_get_next(upm_first);
+
d1361 2
a1362 2
+ATF_TC(uvm_page_physunload);
+ATF_TC_HEAD(uvm_page_physunload, tc)
d1365 1
a1365 1
+	    call works without a panic().");
d1367 1
a1367 1
+ATF_TC_BODY(uvm_page_physunload, tc)
d1369 3
d1381 1
a1381 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
a1387 2
+	upm = uvm_physmem_get_first();
+
d1402 2
a1404 1
+	ATF_CHECK_EQ(VALID_START_PFN_2, uvm_physmem_get_start(upm));
d1407 26
d1436 9
d1448 1
a1448 1
+	 * When called for second time, uvm_page_physload() removes the last PFN
d1450 2
a1451 1
+	 * New end will be VALID_AVAIL_END_PFN_2 + 1
d1454 2
d1457 4
a1460 2
+	 * Remember VALID_END_PFN_2 only the upper limit the actual valid pointer
+	 * is VALID_END_PFN_2 - 1
a1461 1
+	ATF_CHECK_EQ(VALID_END_PFN_2 - 1, atop(p));
a1462 2
+	ATF_CHECK_EQ(VALID_AVAIL_START_PFN_2 + 1,
+	    uvm_physmem_get_avail_start(upm));
d1465 2
d1468 2
d1471 29
a1499 1
+	ATF_CHECK_EQ(VALID_END_PFN_2, uvm_physmem_get_end(upm));
d1503 9
d1520 2
a1521 1
+	ATF_CHECK_EQ(VALID_AVAIL_END_PFN_2 - 1, uvm_physmem_get_avail_end(upm));
d1526 2
a1527 2
+ATF_TC(uvm_page_physunload_delete);
+ATF_TC_HEAD(uvm_page_physunload_delete, tc)
d1530 2
a1531 1
+	    works when the segment gets small enough to be deleted scenario.");
d1533 1
a1533 1
+ATF_TC_BODY(uvm_page_physunload_delete, tc)
d1535 3
d1545 1
d1549 2
a1550 1
+	uvm_page_physload(VALID_END_PFN_1 - 1, VALID_END_PFN_1,
a1557 2
+	upm = uvm_physmem_get_first();
+
d1580 22
d1607 2
a1608 2
+	uvm_page_physload(VALID_START_PFN_1, VALID_START_PFN_1 + 1,
+	    VALID_AVAIL_START_PFN_1 + 1, VALID_AVAIL_START_PFN_1 + 1,
d1611 5
a1615 1
+	ATF_REQUIRE_EQ(2, uvm_physmem_get_entries());
d1617 1
a1617 1
+//	ATF_REQUIRE_EQ(0, uvmexp.npages);
d1619 4
a1622 1
+	upm = uvm_physmem_get_first();
d1624 1
a1624 1
+//	ATF_REQUIRE_EQ(1, uvmexp.npages);
d1660 1
a1660 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_END_PFN_2,
a1666 2
+	upm = uvm_physmem_get_first();
+
d1680 3
d1692 1
a1692 1
+	uvm_page_physload(VALID_START_PFN_1, VALID_END_PFN_1,
a1698 2
+	upm = uvm_physmem_get_first();
+
d1738 1
a1738 1
+	uvm_page_physload(VALID_START_PFN_2, VALID_START_PFN_2+ 1,
a1744 2
+	upm = uvm_physmem_get_first();
+
d1755 1
d1782 5
a1786 2
+	ATF_TP_ADD_TC(tp, uvm_page_physunload);
+	ATF_TP_ADD_TC(tp, uvm_page_physunload_delete);
@