x86: PAT: make pat_x_mtrr_type() more readable

Clean up over-complications in pat_x_mtrr_type().
And if reserve_memtype() ignores stray req_type bits when
pat_enabled, it's better to mask them off when not also.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Hugh Dickins 2008-06-16 18:42:43 +01:00 committed by Ingo Molnar
parent faeca31d06
commit 6cf514fce1

View File

@ -159,47 +159,31 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
* The intersection is based on "Effective Memory Type" tables in IA-32 * The intersection is based on "Effective Memory Type" tables in IA-32
* SDM vol 3a * SDM vol 3a
*/ */
static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
unsigned long *ret_prot)
{ {
unsigned long pat_type;
u8 mtrr_type; u8 mtrr_type;
pat_type = prot & _PAGE_CACHE_MASK;
prot &= (~_PAGE_CACHE_MASK);
/* /*
* We return the PAT request directly for types where PAT takes * We return the PAT request directly for types where PAT takes
* precedence with respect to MTRR and for UC_MINUS. * precedence with respect to MTRR and for UC_MINUS.
* Consistency checks with other PAT requests is done later * Consistency checks with other PAT requests is done later
* while going through memtype list. * while going through memtype list.
*/ */
if (pat_type == _PAGE_CACHE_WC) { if (req_type == _PAGE_CACHE_WC ||
*ret_prot = prot | _PAGE_CACHE_WC; req_type == _PAGE_CACHE_UC_MINUS ||
return 0; req_type == _PAGE_CACHE_UC)
} else if (pat_type == _PAGE_CACHE_UC_MINUS) { return req_type;
*ret_prot = prot | _PAGE_CACHE_UC_MINUS;
return 0;
} else if (pat_type == _PAGE_CACHE_UC) {
*ret_prot = prot | _PAGE_CACHE_UC;
return 0;
}
/* /*
* Look for MTRR hint to get the effective type in case where PAT * Look for MTRR hint to get the effective type in case where PAT
* request is for WB. * request is for WB.
*/ */
mtrr_type = mtrr_type_lookup(start, end); mtrr_type = mtrr_type_lookup(start, end);
if (mtrr_type == MTRR_TYPE_UNCACHABLE)
if (mtrr_type == MTRR_TYPE_UNCACHABLE) { return _PAGE_CACHE_UC;
*ret_prot = prot | _PAGE_CACHE_UC; if (mtrr_type == MTRR_TYPE_WRCOMB)
} else if (mtrr_type == MTRR_TYPE_WRCOMB) { return _PAGE_CACHE_WC;
*ret_prot = prot | _PAGE_CACHE_WC; return _PAGE_CACHE_WB;
} else {
*ret_prot = prot | _PAGE_CACHE_WB;
}
return 0;
} }
/* /*
@ -232,7 +216,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (req_type == -1) { if (req_type == -1) {
*ret_type = _PAGE_CACHE_WB; *ret_type = _PAGE_CACHE_WB;
} else { } else {
*ret_type = req_type; *ret_type = req_type & _PAGE_CACHE_MASK;
} }
} }
return 0; return 0;
@ -264,14 +248,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
} }
} else { } else {
req_type &= _PAGE_CACHE_MASK; req_type &= _PAGE_CACHE_MASK;
err = pat_x_mtrr_type(start, end, req_type, &actual_type); actual_type = pat_x_mtrr_type(start, end, req_type);
}
if (err) {
if (ret_type)
*ret_type = actual_type;
return -EINVAL;
} }
new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);