x86/resctrl: Move per RDT domain initialization to a separate function
Carve out per rdt_domain initialization code from rdtgroup_init_alloc() into a separate function. No functional change, make the code more readable and save us at least two indentation levels. Signed-off-by: Xiaochen Shen <xiaochen.shen@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: pei.p.jia@intel.com Cc: Reinette Chatre <reinette.chatre@intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/1555499329-1170-2-git-send-email-xiaochen.shen@intel.com
This commit is contained in:
parent
1bddcc645f
commit
7390619ab9
@ -2516,37 +2516,23 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
|
||||
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdtgroup_init_alloc - Initialize the new RDT group's allocations
|
||||
/*
|
||||
* Initialize cache resources per RDT domain
|
||||
*
|
||||
* A new RDT group is being created on an allocation capable (CAT)
|
||||
* supporting system. Set this group up to start off with all usable
|
||||
* allocations. That is, all shareable and unused bits.
|
||||
*
|
||||
* All-zero CBM is invalid. If there are no more shareable bits available
|
||||
* on any domain then the entire allocation will fail.
|
||||
* Set the RDT domain up to start off with all usable allocations. That is,
|
||||
* all shareable and unused bits. All-zero CBM is invalid.
|
||||
*/
|
||||
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
||||
u32 closid)
|
||||
{
|
||||
struct rdt_resource *r_cdp = NULL;
|
||||
struct rdt_domain *d_cdp = NULL;
|
||||
u32 used_b = 0, unused_b = 0;
|
||||
u32 closid = rdtgrp->closid;
|
||||
struct rdt_resource *r;
|
||||
unsigned long tmp_cbm;
|
||||
enum rdtgrp_mode mode;
|
||||
struct rdt_domain *d;
|
||||
u32 peer_ctl, *ctrl;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
/*
|
||||
* Only initialize default allocations for CBM cache
|
||||
* resources
|
||||
*/
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
|
||||
d->have_new_ctrl = false;
|
||||
d->new_ctrl = r->cache.shareable_bits;
|
||||
@ -2558,10 +2544,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
||||
break;
|
||||
/*
|
||||
* If CDP is active include peer
|
||||
* domain's usage to ensure there
|
||||
* is no overlap with an exclusive
|
||||
* group.
|
||||
* If CDP is active include peer domain's
|
||||
* usage to ensure there is no overlap
|
||||
* with an exclusive group.
|
||||
*/
|
||||
if (d_cdp)
|
||||
peer_ctl = d_cdp->ctrl_val[i];
|
||||
@ -2583,18 +2568,46 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
*/
|
||||
cbm_ensure_valid(&d->new_ctrl, r);
|
||||
/*
|
||||
* Assign the u32 CBM to an unsigned long to ensure
|
||||
* that bitmap_weight() does not access out-of-bound
|
||||
* memory.
|
||||
* Assign the u32 CBM to an unsigned long to ensure that
|
||||
* bitmap_weight() does not access out-of-bound memory.
|
||||
*/
|
||||
tmp_cbm = d->new_ctrl;
|
||||
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
|
||||
r->cache.min_cbm_bits) {
|
||||
rdt_last_cmd_printf("No space on %s:%d\n",
|
||||
r->name, d->id);
|
||||
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
|
||||
rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
|
||||
return -ENOSPC;
|
||||
}
|
||||
d->have_new_ctrl = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rdtgroup_init_alloc - Initialize the new RDT group's allocations
|
||||
*
|
||||
* A new RDT group is being created on an allocation capable (CAT)
|
||||
* supporting system. Set this group up to start off with all usable
|
||||
* allocations.
|
||||
*
|
||||
* If there are no more shareable bits available on any domain then
|
||||
* the entire allocation will fail.
|
||||
*/
|
||||
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
int ret;
|
||||
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
/*
|
||||
* Only initialize default allocations for CBM cache
|
||||
* resources
|
||||
*/
|
||||
if (r->rid == RDT_RESOURCE_MBA)
|
||||
continue;
|
||||
list_for_each_entry(d, &r->domains, list) {
|
||||
ret = __init_one_rdt_domain(d, r, rdtgrp->closid);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user