[XFS] Add a greedy allocation interface, allocating within a min/max size
range. SGI-PV: 955302 SGI-Modid: xfs-linux-melb:xfs-kern:26803a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
parent
572d95f49f
commit
77e4635ae1
@ -68,6 +68,22 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *
|
||||
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
|
||||
unsigned int __nocast flags)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
while (!(ptr = kmem_zalloc(maxsize, flags))) {
|
||||
if ((maxsize >>= 1) <= minsize) {
|
||||
maxsize = minsize;
|
||||
flags = KM_SLEEP;
|
||||
}
|
||||
}
|
||||
*size = maxsize;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void
|
||||
kmem_free(void *ptr, size_t size)
|
||||
{
|
||||
|
@ -55,8 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
|
||||
}
|
||||
|
||||
extern void *kmem_alloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
|
||||
extern void *kmem_zalloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
|
||||
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
|
||||
extern void kmem_free(void *, size_t);
|
||||
|
||||
/*
|
||||
|
@ -112,17 +112,16 @@ xfs_Gqm_init(void)
|
||||
{
|
||||
xfs_dqhash_t *udqhash, *gdqhash;
|
||||
xfs_qm_t *xqm;
|
||||
uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
|
||||
uint i, hsize;
|
||||
|
||||
/*
|
||||
* Initialize the dquot hash tables.
|
||||
*/
|
||||
hsize = XFS_QM_HASHSIZE_HIGH;
|
||||
while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) {
|
||||
if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
|
||||
flags = KM_SLEEP;
|
||||
}
|
||||
gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
|
||||
udqhash = kmem_zalloc_greedy(&hsize,
|
||||
XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
|
||||
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
|
||||
hsize /= sizeof(xfs_dqhash_t);
|
||||
ndquot = hsize << 8;
|
||||
|
||||
xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
|
||||
|
@ -50,7 +50,7 @@ void
|
||||
xfs_ihash_init(xfs_mount_t *mp)
|
||||
{
|
||||
__uint64_t icount;
|
||||
uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
|
||||
uint i;
|
||||
|
||||
if (!mp->m_ihsize) {
|
||||
icount = mp->m_maxicount ? mp->m_maxicount :
|
||||
@ -61,14 +61,13 @@ xfs_ihash_init(xfs_mount_t *mp)
|
||||
(64 * NBPP) / sizeof(xfs_ihash_t));
|
||||
}
|
||||
|
||||
while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
|
||||
sizeof(xfs_ihash_t), flags))) {
|
||||
if ((mp->m_ihsize >>= 1) <= NBPP)
|
||||
flags = KM_SLEEP;
|
||||
}
|
||||
for (i = 0; i < mp->m_ihsize; i++) {
|
||||
mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
|
||||
NBPC * sizeof(xfs_ihash_t),
|
||||
mp->m_ihsize * sizeof(xfs_ihash_t),
|
||||
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
mp->m_ihsize /= sizeof(xfs_ihash_t);
|
||||
for (i = 0; i < mp->m_ihsize; i++)
|
||||
rwlock_init(&(mp->m_ihash[i].ih_lock));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -77,7 +76,7 @@ xfs_ihash_init(xfs_mount_t *mp)
|
||||
void
|
||||
xfs_ihash_free(xfs_mount_t *mp)
|
||||
{
|
||||
kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
|
||||
kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t));
|
||||
mp->m_ihash = NULL;
|
||||
}
|
||||
|
||||
|
@ -326,7 +326,6 @@ xfs_bulkstat(
|
||||
int i; /* loop index */
|
||||
int icount; /* count of inodes good in irbuf */
|
||||
int irbsize; /* size of irec buffer in bytes */
|
||||
unsigned int kmflags; /* flags for allocating irec buffer */
|
||||
xfs_ino_t ino; /* inode number (filesystem) */
|
||||
xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
|
||||
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
|
||||
@ -371,19 +370,8 @@ xfs_bulkstat(
|
||||
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
|
||||
nimask = ~(nicluster - 1);
|
||||
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
|
||||
/*
|
||||
* Allocate a local buffer for inode cluster btree records.
|
||||
* This caps our maximum readahead window (so don't be stingy)
|
||||
* but we must handle the case where we can't get a contiguous
|
||||
* multi-page buffer, so we drop back toward pagesize; the end
|
||||
* case we ensure succeeds, via appropriate allocation flags.
|
||||
*/
|
||||
irbsize = NBPP * 4;
|
||||
kmflags = KM_SLEEP | KM_MAYFAIL;
|
||||
while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
|
||||
if ((irbsize >>= 1) <= NBPP)
|
||||
kmflags = KM_SLEEP;
|
||||
}
|
||||
irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4,
|
||||
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
nirbuf = irbsize / sizeof(*irbuf);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user