linux/lib/vsprintf.c

3708 lines
87 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/vsprintf.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
/*
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
* - scnprintf and vscnprintf
*/
#include <linux/stdarg.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
printf: add support for printing symbolic error names It has been suggested several times to extend vsnprintf() to be able to convert the numeric value of ENOSPC to print "ENOSPC". This implements that as a %p extension: With %pe, one can do if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %pe\n", foo); return PTR_ERR(foo); } instead of what is seen in quite a few places in the kernel: if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %ld\n", PTR_ERR(foo)); return PTR_ERR(foo); } If the value passed to %pe is an ERR_PTR, but the library function errname() added here doesn't know about the value, the value is simply printed in decimal. If the value passed to %pe is not an ERR_PTR, we treat it as an ordinary %p and thus print the hashed value (passing non-ERR_PTR values to %pe indicates a bug in the caller, but we can't do much about that). With my embedded hat on, and because it's not very invasive to do, I've made it possible to remove this. The errname() function and associated lookup tables take up about 3K. For most, that's probably quite acceptable and a price worth paying for more readable dmesg (once this starts getting used), while for those that disable printk() it's of very little use - I don't see a procfs/sysfs/seq_printf() file reasonably making use of this - and they clearly want to squeeze vmlinux as much as possible. Hence the default y if PRINTK. The symbols to include have been found by massaging the output of find arch include -iname 'errno*.h' | xargs grep -E 'define\s*E' In the cases where some common aliasing exists (e.g. EAGAIN=EWOULDBLOCK on all platforms, EDEADLOCK=EDEADLK on most), I've moved the more popular one (in terms of 'git grep -w Efoo | wc) to the bottom so that one takes precedence. Link: http://lkml.kernel.org/r/20191015190706.15989-1-linux@rasmusvillemoes.dk To: "Jonathan Corbet" <corbet@lwn.net> To: linux-kernel@vger.kernel.org Cc: "Andy Shevchenko" <andy.shevchenko@gmail.com> Cc: "Andrew Morton" <akpm@linux-foundation.org> Cc: "Joe Perches" <joe@perches.com> Cc: linux-doc@vger.kernel.org Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Uwe Kleine-König <uwe@kleine-koenig.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [andy.shevchenko@gmail.com: use abs()] Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-10-15 22:07:05 +03:00
#include <linux/errname.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <linux/dcache.h>
vsprintf: check real user/group id for %pK Some setuid binaries will allow reading of files which have read permission by the real user id. This is problematic with files which use %pK because the file access permission is checked at open() time, but the kptr_restrict setting is checked at read() time. If a setuid binary opens a %pK file as an unprivileged user, and then elevates permissions before reading the file, then kernel pointer values may be leaked. This happens for example with the setuid pppd application on Ubuntu 12.04: $ head -1 /proc/kallsyms 00000000 T startup_32 $ pppd file /proc/kallsyms pppd: In file /proc/kallsyms: unrecognized option 'c1000000' This will only leak the pointer value from the first line, but other setuid binaries may leak more information. Fix this by adding a check that in addition to the current process having CAP_SYSLOG, that effective user and group ids are equal to the real ids. If a setuid binary reads the contents of a file which uses %pK then the pointer values will be printed as NULL if the real user is unprivileged. Update the sysctl documentation to reflect the changes, and also correct the documentation to state the kptr_restrict=0 is the default. This is a only temporary solution to the issue. The correct solution is to do the permission check at open() time on files, and to replace %pK with a function which checks the open() time permission. %pK uses in printk should be removed since no sane permission check can be done, and instead protected by using dmesg_restrict. Signed-off-by: Ryan Mallon <rmallon@gmail.com> Cc: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Joe Perches <joe@perches.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-11-13 03:08:51 +04:00
#include <linux/cred.h>
#include <linux/rtc.h>
#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
#include <linux/property.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
#include "../mm/internal.h" /* For the trace_print_flags arrays */
#include <asm/page.h> /* for PAGE_SIZE */
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
#include <asm/byteorder.h> /* cpu_to_le16 */
#include <linux/string_helpers.h>
#include "kstrtox.h"
static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
{
const char *cp;
unsigned long long result = 0ULL;
size_t prefix_chars;
unsigned int rv;
cp = _parse_integer_fixup_radix(startp, &base);
prefix_chars = cp - startp;
if (prefix_chars < max_chars) {
rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
/* FIXME */
cp += (rv & ~KSTRTOX_OVERFLOW);
} else {
/* Field too short for prefix + digit, skip over without converting */
cp = startp + max_chars;
}
if (endp)
*endp = (char *)cp;
return result;
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoull instead.
*/
noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
return simple_strntoull(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoull);
/**
* simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
return simple_strtoull(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtoul);
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoul(cp + 1, endp, base);
return simple_strtoul(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtol);
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
unsigned int base)
{
/*
* simple_strntoull() safely handles receiving max_chars==0 in the
* case cp[0] == '-' && max_chars == 1.
* If max_chars == 0 we can drop through and pass it to simple_strntoull()
* and the content of *cp is irrelevant.
*/
if (*cp == '-' && max_chars > 0)
return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
return simple_strntoull(cp, max_chars, endp, base);
}
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
return simple_strntoll(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoll);
static noinline_for_stack
int skip_atoi(const char **s)
{
int i = 0;
do {
i = i*10 + *((*s)++) - '0';
} while (isdigit(**s));
return i;
}
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/*
* Decimal conversion is by far the most typical, and is used for
* /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed by emitting
* two characters at a time, using a 200 byte lookup table. This
* roughly halves the number of multiplications compared to computing
* the digits one at a time. Implementation strongly inspired by the
* previous version, which in turn used ideas described at
* <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
* from the author, Douglas W. Jones).
*
* It turns out there is precisely one 26 bit fixed-point
* approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
* holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
* range happens to be somewhat larger (x <= 1073741898), but that's
* irrelevant for our purpose.
*
* For dividing a number in the range [10^4, 10^6-1] by 100, we still
* need a 32x32->64 bit multiply, so we simply use the same constant.
*
* For dividing a number in the range [100, 10^4-1] by 100, there are
* several options. The simplest is (x * 0x147b) >> 19, which is valid
* for all x <= 43698.
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
*/
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
static const u16 decpair[100] = {
#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
_( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
_(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
_(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
_(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
_(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
_(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
_(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
_(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
_(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
_(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
#undef _
};
/*
* This will print a single '0' even if r == 0, since we would
* immediately jump to out_r where two 0s would be written but only
* one of them accounted for in buf. This is needed by ip4_string
* below. All other callers pass a non-zero value of r.
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
*/
static noinline_for_stack
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
char *put_dec_trunc8(char *buf, unsigned r)
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
{
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
unsigned q;
/* 1 <= r < 10^8 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 1 <= q < 10^6 */
if (q < 100)
goto out_q;
/* 100 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 1 <= r < 10^4 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
out_q:
/* 1 <= q < 100 */
r = q;
out_r:
/* 1 <= r < 100 */
*((u16 *)buf) = decpair[r];
buf += r < 10 ? 1 : 2;
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
return buf;
}
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
char *put_dec_full8(char *buf, unsigned r)
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
{
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
unsigned q;
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* 0 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* 0 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
buf += 2;
return buf;
}
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
static noinline_for_stack
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
char *put_dec(char *buf, unsigned long long n)
{
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n <= 1.6e11 */
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n < 1e8 */
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
return put_dec_trunc8(buf, n);
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
}
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
static void
put_dec_full4(char *buf, unsigned r)
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
{
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
unsigned q;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
}
/*
* Call put_dec_full4 on x % 10000, return x / 10000.
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
* (second call in the put_dec code, assuming n is all-ones).
*/
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
put_dec_full4(buf, x - q * 10000);
return q;
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
}
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
/* Based on code by Douglas W. Jones found at
* <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
* (with permission from the author).
* Performs no 64-bit division and hence should be fast on 32-bit machines.
*/
static
char *put_dec(char *buf, unsigned long long n)
{
uint32_t d3, d2, d1, q, h;
if (n < 100*1000*1000)
return put_dec_trunc8(buf, n);
d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
h = (n >> 32);
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
= 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
q += 7671 * d3 + 9496 * d2 + 6 * d1;
q = put_dec_helper4(buf+4, q);
q += 4749 * d3 + 42 * d2;
q = put_dec_helper4(buf+8, q);
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
q += 281 * d3;
buf += 12;
if (q)
buf = put_dec_trunc8(buf, q);
else while (buf[-1] == '0')
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
--buf;
return buf;
}
#endif
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
/*
* Convert passed number to decimal string.
* Returns the length of string. On buffer overflow, returns 0.
*
* If speed is not important, use snprintf(). It's easy to read the code.
*/
int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
{
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
int idx, len;
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
if (num <= 9) {
tmp[0] = '0' + num;
len = 1;
} else {
len = put_dec(tmp, num) - tmp;
}
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
if (len > size || width > size)
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
return 0;
if (width > len) {
width = width - len;
for (idx = 0; idx < width; idx++)
buf[idx] = ' ';
} else {
width = 0;
}
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
for (idx = 0; idx < len; ++idx)
buf[idx + width] = tmp[len - idx - 1];
return len + width;
procfs: add num_to_str() to speed up /proc/stat == stat_check.py num = 0 with open("/proc/stat") as f: while num < 1000 : data = f.read() f.seek(0, 0) num = num + 1 == perf shows 20.39% stat_check.py [kernel.kallsyms] [k] format_decode 13.41% stat_check.py [kernel.kallsyms] [k] number 12.61% stat_check.py [kernel.kallsyms] [k] vsnprintf 10.85% stat_check.py [kernel.kallsyms] [k] memcpy 4.85% stat_check.py [kernel.kallsyms] [k] radix_tree_lookup 4.43% stat_check.py [kernel.kallsyms] [k] seq_printf This patch removes most of calls to vsnprintf() by adding num_to_str() and seq_print_decimal_ull(), which prints decimal numbers without rich functions provided by printf(). On my 8cpu box. == Before patch == [root@bluextal test]# time ./stat_check.py real 0m0.150s user 0m0.026s sys 0m0.121s == After patch == [root@bluextal test]# time ./stat_check.py real 0m0.055s user 0m0.022s sys 0m0.030s [akpm@linux-foundation.org: remove incorrect comment, use less statck in num_to_str(), move comment from .h to .c, simplify seq_put_decimal_ull()] [andrea@betterlinux.com: avoid breaking the ABI in /proc/stat] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Righi <andrea@betterlinux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Glauber Costa <glommer@parallels.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Turner <pjt@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-24 02:02:54 +04:00
}
#define SIGN 1 /* unsigned/signed, must be 1 */
#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
static_assert(SIGN == 1);
static_assert(ZEROPAD == ('0' - ' '));
static_assert(SMALL == ('a' ^ 'A'));
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
FORMAT_TYPE_WIDTH,
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
FORMAT_TYPE_PRECISION,
FORMAT_TYPE_CHAR,
FORMAT_TYPE_STR,
FORMAT_TYPE_PTR,
FORMAT_TYPE_PERCENT_CHAR,
FORMAT_TYPE_INVALID,
FORMAT_TYPE_LONG_LONG,
FORMAT_TYPE_ULONG,
FORMAT_TYPE_LONG,
FORMAT_TYPE_UBYTE,
FORMAT_TYPE_BYTE,
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
FORMAT_TYPE_USHORT,
FORMAT_TYPE_SHORT,
FORMAT_TYPE_UINT,
FORMAT_TYPE_INT,
FORMAT_TYPE_SIZE_T,
FORMAT_TYPE_PTRDIFF
};
struct printf_spec {
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
unsigned int type:8; /* format_type enum */
signed int field_width:24; /* width of output field */
unsigned int flags:8; /* flags to number() */
unsigned int base:8; /* number base, 8, 10 or 16 only */
signed int precision:16; /* # of digits/chars */
} __packed;
static_assert(sizeof(struct printf_spec) == 8);
#define FIELD_WIDTH_MAX ((1 << 23) - 1)
#define PRECISION_MAX ((1 << 15) - 1)
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
int i;
bool is_zero = num == 0LL;
int field_width = spec.field_width;
int precision = spec.precision;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
locase = (spec.flags & SMALL);
if (spec.flags & LEFT)
spec.flags &= ~ZEROPAD;
sign = 0;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (spec.flags & SIGN) {
if ((signed long long)num < 0) {
sign = '-';
num = -(signed long long)num;
field_width--;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
} else if (spec.flags & PLUS) {
sign = '+';
field_width--;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
} else if (spec.flags & SPACE) {
sign = ' ';
field_width--;
}
}
if (need_pfx) {
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (spec.base == 16)
field_width -= 2;
else if (!is_zero)
field_width--;
}
/* generate full string in tmp[], in reverse order */
i = 0;
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
if (num < spec.base)
tmp[i++] = hex_asc_upper[num] | locase;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
if (spec.base == 16)
shift = 4;
do {
tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
num >>= shift;
} while (num);
vsprintf.c: optimizing, part 2: base 10 conversion speedup, v2 Optimize integer-to-string conversion in vsprintf.c for base 10. This is by far the most used conversion, and in some use cases it impacts performance. For example, top reads /proc/$PID/stat for every process, and with 4000 processes decimal conversion alone takes noticeable time. Using code from http://www.cs.uiowa.edu/~jones/bcd/decimal.html (with permission from the author, Douglas W. Jones) binary-to-decimal-string conversion is done in groups of five digits at once, using only additions/subtractions/shifts (with -O2; -Os throws in some multiply instructions). On i386 arch gcc 4.1.2 -O2 generates ~500 bytes of code. This patch is run tested. Userspace benchmark/test is also attached. I tested it on PIII and AMD64 and new code is generally ~2.5 times faster. On AMD64: # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 12895992590592 ok... [Ctrl-C] # ./vsprintf_verify-O2 Original decimal conv: .......... 151 ns per iteration Patched decimal conv: .......... 62 ns per iteration Testing correctness 26025406464 ok... [Ctrl-C] More realistic test: top from busybox project was modified to report how many us it took to scan /proc (this does not account any processing done after that, like sorting process list), and then I test it with 4000 processes: #!/bin/sh i=4000 while test $i != 0; do sleep 30 & let i-- done busybox top -b -n3 >/dev/null on unpatched kernel: top: 4120 processes took 102864 microseconds to scan top: 4120 processes took 91757 microseconds to scan top: 4120 processes took 92517 microseconds to scan top: 4120 processes took 92581 microseconds to scan on patched kernel: top: 4120 processes took 75460 microseconds to scan top: 4120 processes took 66451 microseconds to scan top: 4120 processes took 67267 microseconds to scan top: 4120 processes took 67618 microseconds to scan The speedup comes from much faster generation of /proc/PID/stat by sprintf() calls inside the kernel. Signed-off-by: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-16 10:41:56 +04:00
} else { /* base 10 */
i = put_dec(tmp, num) - tmp;
}
/* printing 100 using %2d gives "100", not "00" */
if (i > precision)
precision = i;
/* leading space padding */
field_width -= precision;
if (!(spec.flags & (ZEROPAD | LEFT))) {
while (--field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
}
}
/* sign */
if (sign) {
if (buf < end)
*buf = sign;
++buf;
}
/* "0x" / "0" prefix */
if (need_pfx) {
if (spec.base == 16 || !is_zero) {
if (buf < end)
*buf = '0';
++buf;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (spec.base == 16) {
if (buf < end)
*buf = ('X' | locase);
++buf;
}
}
/* zero or space padding */
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (!(spec.flags & LEFT)) {
char c = ' ' + (spec.flags & ZEROPAD);
while (--field_width >= 0) {
if (buf < end)
*buf = c;
++buf;
}
}
/* hmm even more zero padding? */
while (i <= --precision) {
if (buf < end)
*buf = '0';
++buf;
}
/* actual digits of result */
while (--i >= 0) {
if (buf < end)
*buf = tmp[i];
++buf;
}
/* trailing space padding */
while (--field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
static noinline_for_stack
char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
{
struct printf_spec spec;
spec.type = FORMAT_TYPE_PTR;
spec.field_width = 2 + 2 * size; /* 0x + hex */
spec.flags = SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
spec.precision = -1;
return number(buf, end, num, spec);
}
static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
{
size_t size;
if (buf >= end) /* nowhere to put anything */
return;
size = end - buf;
if (size <= spaces) {
memset(buf, ' ', size);
return;
}
if (len) {
if (len > size - spaces)
len = size - spaces;
memmove(buf + spaces, buf, len);
}
memset(buf, ' ', spaces);
}
/*
* Handle field width padding for a string.
* @buf: current buffer position
* @n: length of string
* @end: end of output buffer
* @spec: for field width and flags
* Returns: new buffer position after padding.
*/
static noinline_for_stack
char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
{
unsigned spaces;
if (likely(n >= spec.field_width))
return buf;
/* we want to pad the sucker */
spaces = spec.field_width - n;
if (!(spec.flags & LEFT)) {
move_right(buf - n, end, n, spaces);
return buf + spaces;
}
while (spaces--) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
/* Handle string from a well known address. */
static char *string_nocheck(char *buf, char *end, const char *s,
struct printf_spec spec)
{
int len = 0;
int lim = spec.precision;
while (lim--) {
char c = *s++;
if (!c)
break;
if (buf < end)
*buf = c;
++buf;
++len;
}
return widen_string(buf, len, end, spec);
}
printf: add support for printing symbolic error names It has been suggested several times to extend vsnprintf() to be able to convert the numeric value of ENOSPC to print "ENOSPC". This implements that as a %p extension: With %pe, one can do if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %pe\n", foo); return PTR_ERR(foo); } instead of what is seen in quite a few places in the kernel: if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %ld\n", PTR_ERR(foo)); return PTR_ERR(foo); } If the value passed to %pe is an ERR_PTR, but the library function errname() added here doesn't know about the value, the value is simply printed in decimal. If the value passed to %pe is not an ERR_PTR, we treat it as an ordinary %p and thus print the hashed value (passing non-ERR_PTR values to %pe indicates a bug in the caller, but we can't do much about that). With my embedded hat on, and because it's not very invasive to do, I've made it possible to remove this. The errname() function and associated lookup tables take up about 3K. For most, that's probably quite acceptable and a price worth paying for more readable dmesg (once this starts getting used), while for those that disable printk() it's of very little use - I don't see a procfs/sysfs/seq_printf() file reasonably making use of this - and they clearly want to squeeze vmlinux as much as possible. Hence the default y if PRINTK. The symbols to include have been found by massaging the output of find arch include -iname 'errno*.h' | xargs grep -E 'define\s*E' In the cases where some common aliasing exists (e.g. EAGAIN=EWOULDBLOCK on all platforms, EDEADLOCK=EDEADLK on most), I've moved the more popular one (in terms of 'git grep -w Efoo | wc) to the bottom so that one takes precedence. Link: http://lkml.kernel.org/r/20191015190706.15989-1-linux@rasmusvillemoes.dk To: "Jonathan Corbet" <corbet@lwn.net> To: linux-kernel@vger.kernel.org Cc: "Andy Shevchenko" <andy.shevchenko@gmail.com> Cc: "Andrew Morton" <akpm@linux-foundation.org> Cc: "Joe Perches" <joe@perches.com> Cc: linux-doc@vger.kernel.org Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Uwe Kleine-König <uwe@kleine-koenig.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [andy.shevchenko@gmail.com: use abs()] Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-10-15 22:07:05 +03:00
static char *err_ptr(char *buf, char *end, void *ptr,
struct printf_spec spec)
{
int err = PTR_ERR(ptr);
const char *sym = errname(err);
if (sym)
return string_nocheck(buf, end, sym, spec);
/*
* Somebody passed ERR_PTR(-1234) or some other non-existing
* Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
* printing it as its decimal representation.
*/
spec.flags |= SIGN;
spec.base = 10;
return number(buf, end, err, spec);
}
/* Be careful: error messages must fit into the given buffer. */
static char *error_string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
/*
* Hard limit to avoid a completely insane messages. It actually
* works pretty well because most error messages are in
* the many pointer format modifiers.
*/
if (spec.precision == -1)
spec.precision = 2 * sizeof(void *);
return string_nocheck(buf, end, s, spec);
}
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
/*
vsprintf: Do not break early boot with probing addresses The commit 3e5903eb9cff70730 ("vsprintf: Prevent crash when dereferencing invalid pointers") broke boot on several architectures. The common pattern is that probe_kernel_read() is not working during early boot because userspace access framework is not ready. It is a generic problem. We have to avoid any complex external functions in vsprintf() code, especially in the common path. They might break printk() easily and are hard to debug. Replace probe_kernel_read() with some simple checks for obvious problems. Details: 1. Report on Power: Kernel crashes very early during boot with with CONFIG_PPC_KUAP and CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG The problem is the combination of some new code called via printk(), check_pointer() which calls probe_kernel_read(). That then calls allow_user_access() (PPC_KUAP) and that uses mmu_has_feature() too early (before we've patched features). With the JUMP_LABEL debug enabled that causes us to call printk() & dump_stack() and we end up recursing and overflowing the stack. Because it happens so early you don't get any output, just an apparently dead system. The stack trace (which you don't see) is something like: ... dump_stack+0xdc probe_kernel_read+0x1a4 check_pointer+0x58 string+0x3c vsnprintf+0x1bc vscnprintf+0x20 printk_safe_log_store+0x7c printk+0x40 dump_stack_print_info+0xbc dump_stack+0x8 probe_kernel_read+0x1a4 probe_kernel_read+0x19c check_pointer+0x58 string+0x3c vsnprintf+0x1bc vscnprintf+0x20 vprintk_store+0x6c vprintk_emit+0xec vprintk_func+0xd4 printk+0x40 cpufeatures_process_feature+0xc8 scan_cpufeatures_subnodes+0x380 of_scan_flat_dt_subnodes+0xb4 dt_cpu_ftrs_scan_callback+0x158 of_scan_flat_dt+0xf0 dt_cpu_ftrs_scan+0x3c early_init_devtree+0x360 early_setup+0x9c 2. Report on s390: vsnprintf invocations, are broken on s390. For example, the early boot output now looks like this where the first (efault) should be the linux_banner: [ 0.099985] (efault) [ 0.099985] setup: Linux is running as a z/VM guest operating system in 64-bit mode [ 0.100066] setup: The maximum memory size is 8192MB [ 0.100070] cma: Reserved 4 MiB at (efault) [ 0.100100] numa: NUMA mode: (efault) The reason for this, is that the code assumes that probe_kernel_address() works very early. This however is not true on at least s390. Uaccess on KERNEL_DS works only after page tables have been setup on s390, which happens with setup_arch()->paging_init(). Any probe_kernel_address() invocation before that will return -EFAULT. Fixes: 3e5903eb9cff70730 ("vsprintf: Prevent crash when dereferencing invalid pointers") Link: http://lkml.kernel.org/r/20190510084213.22149-1-pmladek@suse.com Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Michal Hocko <mhocko@suse.cz> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: linux-kernel@vger.kernel.org Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: linuxppc-dev@lists.ozlabs.org Cc: Russell Currey <ruscur@russell.cc> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Stephen Rothwell <sfr@ozlabs.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux-arch@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Petr Mladek <pmladek@suse.com> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-05-10 11:42:13 +03:00
* Do not call any complex external code here. Nested printk()/vsprintf()
* might cause infinite loops. Failures might break printk() and would
* be hard to debug.
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
*/
static const char *check_pointer_msg(const void *ptr)
{
if (!ptr)
return "(null)";
vsprintf: Do not break early boot with probing addresses The commit 3e5903eb9cff70730 ("vsprintf: Prevent crash when dereferencing invalid pointers") broke boot on several architectures. The common pattern is that probe_kernel_read() is not working during early boot because userspace access framework is not ready. It is a generic problem. We have to avoid any complex external functions in vsprintf() code, especially in the common path. They might break printk() easily and are hard to debug. Replace probe_kernel_read() with some simple checks for obvious problems. Details: 1. Report on Power: Kernel crashes very early during boot with with CONFIG_PPC_KUAP and CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG The problem is the combination of some new code called via printk(), check_pointer() which calls probe_kernel_read(). That then calls allow_user_access() (PPC_KUAP) and that uses mmu_has_feature() too early (before we've patched features). With the JUMP_LABEL debug enabled that causes us to call printk() & dump_stack() and we end up recursing and overflowing the stack. Because it happens so early you don't get any output, just an apparently dead system. The stack trace (which you don't see) is something like: ... dump_stack+0xdc probe_kernel_read+0x1a4 check_pointer+0x58 string+0x3c vsnprintf+0x1bc vscnprintf+0x20 printk_safe_log_store+0x7c printk+0x40 dump_stack_print_info+0xbc dump_stack+0x8 probe_kernel_read+0x1a4 probe_kernel_read+0x19c check_pointer+0x58 string+0x3c vsnprintf+0x1bc vscnprintf+0x20 vprintk_store+0x6c vprintk_emit+0xec vprintk_func+0xd4 printk+0x40 cpufeatures_process_feature+0xc8 scan_cpufeatures_subnodes+0x380 of_scan_flat_dt_subnodes+0xb4 dt_cpu_ftrs_scan_callback+0x158 of_scan_flat_dt+0xf0 dt_cpu_ftrs_scan+0x3c early_init_devtree+0x360 early_setup+0x9c 2. Report on s390: vsnprintf invocations, are broken on s390. For example, the early boot output now looks like this where the first (efault) should be the linux_banner: [ 0.099985] (efault) [ 0.099985] setup: Linux is running as a z/VM guest operating system in 64-bit mode [ 0.100066] setup: The maximum memory size is 8192MB [ 0.100070] cma: Reserved 4 MiB at (efault) [ 0.100100] numa: NUMA mode: (efault) The reason for this, is that the code assumes that probe_kernel_address() works very early. This however is not true on at least s390. Uaccess on KERNEL_DS works only after page tables have been setup on s390, which happens with setup_arch()->paging_init(). Any probe_kernel_address() invocation before that will return -EFAULT. Fixes: 3e5903eb9cff70730 ("vsprintf: Prevent crash when dereferencing invalid pointers") Link: http://lkml.kernel.org/r/20190510084213.22149-1-pmladek@suse.com Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Michal Hocko <mhocko@suse.cz> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: linux-kernel@vger.kernel.org Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: linuxppc-dev@lists.ozlabs.org Cc: Russell Currey <ruscur@russell.cc> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Stephen Rothwell <sfr@ozlabs.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux-arch@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Petr Mladek <pmladek@suse.com> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-05-10 11:42:13 +03:00
if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
return "(efault)";
return NULL;
}
static int check_pointer(char **buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *err_msg;
err_msg = check_pointer_msg(ptr);
if (err_msg) {
*buf = error_string(*buf, end, err_msg, spec);
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
return -EFAULT;
}
return 0;
}
static noinline_for_stack
char *string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, s, spec))
return buf;
return string_nocheck(buf, end, s, spec);
}
static char *pointer_string(char *buf, char *end,
const void *ptr,
struct printf_spec spec)
{
spec.base = 16;
spec.flags |= SMALL;
if (spec.field_width == -1) {
spec.field_width = 2 * sizeof(ptr);
spec.flags |= ZEROPAD;
}
return number(buf, end, (unsigned long int)ptr, spec);
}
/* Make pointers available for printing early in the boot sequence. */
static int debug_boot_weak_hash __ro_after_init;
static int __init debug_boot_weak_hash_enable(char *str)
{
debug_boot_weak_hash = 1;
pr_info("debug_boot_weak_hash enabled\n");
return 0;
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
static void enable_ptr_key_workfn(struct work_struct *work)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Needs to run from preemptible context */
static_branch_disable(&not_filled_random_ptr_key);
}
static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
static void fill_random_ptr_key(struct random_ready_callback *unused)
{
/* This may be in an interrupt handler. */
queue_work(system_unbound_wq, &enable_ptr_key_work);
}
static struct random_ready_callback random_ready = {
.func = fill_random_ptr_key
};
static int __init initialize_ptr_random(void)
{
int key_size = sizeof(ptr_key);
int ret;
/* Use hw RNG if available. */
if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
static_branch_disable(&not_filled_random_ptr_key);
return 0;
}
ret = add_random_ready_callback(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
/* This is in preemptible context */
enable_ptr_key_workfn(&enable_ptr_key_work);
return 0;
}
return ret;
}
early_initcall(initialize_ptr_random);
/* Maps a pointer to a 32 bit unique identifier. */
rss_stat: add support to detect RSS updates of external mm When a process updates the RSS of a different process, the rss_stat tracepoint appears in the context of the process doing the update. This can confuse userspace that the RSS of process doing the update is updated, while in reality a different process's RSS was updated. This issue happens in reclaim paths such as with direct reclaim or background reclaim. This patch adds more information to the tracepoint about whether the mm being updated belongs to the current process's context (curr field). We also include a hash of the mm pointer so that the process who the mm belongs to can be uniquely identified (mm_id field). Also vsprintf.c is refactored a bit to allow reuse of hashing code. [akpm@linux-foundation.org: remove unused local `str'] [joelaf@google.com: inline call to ptr_to_hashval] Link: http://lore.kernel.org/r/20191113153816.14b95acd@gandalf.local.home Link: http://lkml.kernel.org/r/20191114164622.GC233237@google.com Link: http://lkml.kernel.org/r/20191106024452.81923-1-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reported-by: Ioannis Ilkos <ilkos@google.com> Acked-by: Petr Mladek <pmladek@suse.com> [lib/vsprintf.c] Cc: Tim Murray <timmurray@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Carmen Jackson <carmenjackson@google.com> Cc: Mayank Gupta <mayankgupta@google.com> Cc: Daniel Colascione <dancol@google.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Minchan Kim <minchan@kernel.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-01 04:50:33 +03:00
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
unsigned long hashval;
if (static_branch_unlikely(&not_filled_random_ptr_key))
return -EAGAIN;
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
/*
* Mask off the first 32 bits, this makes explicit that we have
* modified the address (and 32 bits is plenty for a unique ID).
*/
hashval = hashval & 0xffffffff;
#else
hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
#endif
*hashval_out = hashval;
return 0;
}
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
return __ptr_to_hashval(ptr, hashval_out);
}
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
rss_stat: add support to detect RSS updates of external mm When a process updates the RSS of a different process, the rss_stat tracepoint appears in the context of the process doing the update. This can confuse userspace that the RSS of process doing the update is updated, while in reality a different process's RSS was updated. This issue happens in reclaim paths such as with direct reclaim or background reclaim. This patch adds more information to the tracepoint about whether the mm being updated belongs to the current process's context (curr field). We also include a hash of the mm pointer so that the process who the mm belongs to can be uniquely identified (mm_id field). Also vsprintf.c is refactored a bit to allow reuse of hashing code. [akpm@linux-foundation.org: remove unused local `str'] [joelaf@google.com: inline call to ptr_to_hashval] Link: http://lore.kernel.org/r/20191113153816.14b95acd@gandalf.local.home Link: http://lkml.kernel.org/r/20191114164622.GC233237@google.com Link: http://lkml.kernel.org/r/20191106024452.81923-1-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reported-by: Ioannis Ilkos <ilkos@google.com> Acked-by: Petr Mladek <pmladek@suse.com> [lib/vsprintf.c] Cc: Tim Murray <timmurray@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Carmen Jackson <carmenjackson@google.com> Cc: Mayank Gupta <mayankgupta@google.com> Cc: Daniel Colascione <dancol@google.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Minchan Kim <minchan@kernel.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-01 04:50:33 +03:00
int ret;
vsprintf: don't obfuscate NULL and error pointers I don't see what security concern is addressed by obfuscating NULL and IS_ERR() error pointers, printed with %p/%pK. Given the number of sites where %p is used (over 10000) and the fact that NULL pointers aren't uncommon, it probably wouldn't take long for an attacker to find the hash that corresponds to 0. Although harder, the same goes for most common error values, such as -1, -2, -11, -14, etc. The NULL part actually fixes a regression: NULL pointers weren't obfuscated until commit 3e5903eb9cff ("vsprintf: Prevent crash when dereferencing invalid pointers") which went into 5.2. I'm tacking the IS_ERR() part on here because error pointers won't leak kernel addresses and printing them as pointers shouldn't be any different from e.g. %d with PTR_ERR_OR_ZERO(). Obfuscating them just makes debugging based on existing pr_debug and friends excruciating. Note that the "always print 0's for %pK when kptr_restrict == 2" behaviour which goes way back is left as is. Example output with the patch applied: ptr error-ptr NULL %p: 0000000001f8cc5b fffffffffffffff2 0000000000000000 %pK, kptr = 0: 0000000001f8cc5b fffffffffffffff2 0000000000000000 %px: ffff888048c04020 fffffffffffffff2 0000000000000000 %pK, kptr = 1: ffff888048c04020 fffffffffffffff2 0000000000000000 %pK, kptr = 2: 0000000000000000 0000000000000000 0000000000000000 Fixes: 3e5903eb9cff ("vsprintf: Prevent crash when dereferencing invalid pointers") Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-05-19 14:26:57 +03:00
/*
* Print the real pointer value for NULL and error pointers,
* as they are not actual addresses.
*/
if (IS_ERR_OR_NULL(ptr))
return pointer_string(buf, end, ptr, spec);
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
hashval = hash_long((unsigned long)ptr, 32);
return pointer_string(buf, end, (const void *)hashval, spec);
}
rss_stat: add support to detect RSS updates of external mm When a process updates the RSS of a different process, the rss_stat tracepoint appears in the context of the process doing the update. This can confuse userspace that the RSS of process doing the update is updated, while in reality a different process's RSS was updated. This issue happens in reclaim paths such as with direct reclaim or background reclaim. This patch adds more information to the tracepoint about whether the mm being updated belongs to the current process's context (curr field). We also include a hash of the mm pointer so that the process who the mm belongs to can be uniquely identified (mm_id field). Also vsprintf.c is refactored a bit to allow reuse of hashing code. [akpm@linux-foundation.org: remove unused local `str'] [joelaf@google.com: inline call to ptr_to_hashval] Link: http://lore.kernel.org/r/20191113153816.14b95acd@gandalf.local.home Link: http://lkml.kernel.org/r/20191114164622.GC233237@google.com Link: http://lkml.kernel.org/r/20191106024452.81923-1-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reported-by: Ioannis Ilkos <ilkos@google.com> Acked-by: Petr Mladek <pmladek@suse.com> [lib/vsprintf.c] Cc: Tim Murray <timmurray@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Carmen Jackson <carmenjackson@google.com> Cc: Mayank Gupta <mayankgupta@google.com> Cc: Daniel Colascione <dancol@google.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Minchan Kim <minchan@kernel.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-01 04:50:33 +03:00
ret = __ptr_to_hashval(ptr, &hashval);
if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
return pointer_string(buf, end, (const void *)hashval, spec);
}
int kptr_restrict __read_mostly;
static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
switch (kptr_restrict) {
case 0:
vsprintf: Consistent %pK handling for kptr_restrict == 0 restricted_pointer() pretends that it prints the address when kptr_restrict is set to zero. But it is never called in this situation. Instead, pointer() falls back to ptr_to_id() and hashes the pointer. This patch removes the potential confusion. klp_restrict is checked only in restricted_pointer(). It actually fixes a small race when the address might get printed unhashed: CPU0 CPU1 pointer() if (!kptr_restrict) /* for example set to 2 */ restricted_pointer() /* echo 0 >/proc/sys/kernel/kptr_restrict */ proc_dointvec_minmax_sysadmin() klpr_restrict = 0; switch(kptr_restrict) case 0: break: number() Fixes: ef0010a30935de4e0211 ("vsprintf: don't use 'restricted_pointer()' when not restricting") Link: http://lkml.kernel.org/r/20190417115350.20479-3-pmladek@suse.com To: Andy Shevchenko <andriy.shevchenko@linux.intel.com> To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Cc: Kees Cook <keescook@chromium.org> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:42 +03:00
/* Handle as %p, hash and do _not_ leak addresses. */
return ptr_to_id(buf, end, ptr, spec);
case 1: {
const struct cred *cred;
/*
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
if (in_irq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);
}
/*
* Only print the real pointer value if the current
* process has CAP_SYSLOG and is running with the
* same credentials it started with. This is because
* access to files is checked at open() time, but %pK
* checks permission at read() time. We don't want to
* leak pointer values if a binary opens a file using
* %pK and then elevates privileges before reading it.
*/
cred = current_cred();
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid))
ptr = NULL;
break;
}
case 2:
default:
/* Always print 0's for %pK */
ptr = NULL;
break;
}
return pointer_string(buf, end, ptr, spec);
}
static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
const char *array[4], *s;
const struct dentry *p;
int depth;
int i, n;
switch (fmt[1]) {
case '2': case '3': case '4':
depth = fmt[1] - '0';
break;
default:
depth = 1;
}
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, d, spec)) {
rcu_read_unlock();
return buf;
}
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE() Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
p = READ_ONCE(d->d_parent);
array[i] = READ_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
i++;
break;
}
}
s = array[--i];
for (n = 0; n != spec.precision; n++, buf++) {
char c = *s++;
if (!c) {
if (!i)
break;
c = '/';
s = array[--i];
}
if (buf < end)
*buf = c;
}
rcu_read_unlock();
return widen_string(buf, n, end, spec);
}
static noinline_for_stack
char *file_dentry_name(char *buf, char *end, const struct file *f,
struct printf_spec spec, const char *fmt)
{
if (check_pointer(&buf, end, f, spec))
return buf;
return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
}
#ifdef CONFIG_BLOCK
static noinline_for_stack
char *bdev_name(char *buf, char *end, struct block_device *bdev,
struct printf_spec spec, const char *fmt)
{
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
struct gendisk *hd;
if (check_pointer(&buf, end, bdev, spec))
return buf;
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
if (bdev->bd_partno) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
buf = number(buf, end, bdev->bd_partno, spec);
}
return buf;
}
#endif
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
struct printf_spec spec, const char *fmt)
{
unsigned long value;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
#endif
if (fmt[1] == 'R')
ptr = __builtin_extract_return_addr(ptr);
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
module: add printk formats to add module build ID to stacktraces Let's make kernel stacktraces easier to identify by including the build ID[1] of a module if the stacktrace is printing a symbol from a module. This makes it simpler for developers to locate a kernel module's full debuginfo for a particular stacktrace. Combined with scripts/decode_stracktrace.sh, a developer can download the matching debuginfo from a debuginfod[2] server and find the exact file and line number for the functions plus offsets in a stacktrace that match the module. This is especially useful for pstore crash debugging where the kernel crashes are recorded in something like console-ramoops and the recovery kernel/modules are different or the debuginfo doesn't exist on the device due to space concerns (the debuginfo can be too large for space limited devices). Originally, I put this on the %pS format, but that was quickly rejected given that %pS is used in other places such as ftrace where build IDs aren't meaningful. There was some discussions on the list to put every module build ID into the "Modules linked in:" section of the stacktrace message but that quickly becomes very hard to read once you have more than three or four modules linked in. It also provides too much information when we don't expect each module to be traversed in a stacktrace. Having the build ID for modules that aren't important just makes things messy. Splitting it to multiple lines for each module quickly explodes the number of lines printed in an oops too, possibly wrapping the warning off the console. And finally, trying to stash away each module used in a callstack to provide the ID of each symbol printed is cumbersome and would require changes to each architecture to stash away modules and return their build IDs once unwinding has completed. Instead, we opt for the simpler approach of introducing new printk formats '%pS[R]b' for "pointer symbolic backtrace with module build ID" and '%pBb' for "pointer backtrace with module build ID" and then updating the few places in the architecture layer where the stacktrace is printed to use this new format. Before: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm] direct_entry+0x16c/0x1b4 [lkdtm] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 After: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] direct_entry+0x16c/0x1b4 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 [akpm@linux-foundation.org: fix build with CONFIG_MODULES=n, tweak code layout] [rdunlap@infradead.org: fix build when CONFIG_MODULES is not set] Link: https://lkml.kernel.org/r/20210513171510.20328-1-rdunlap@infradead.org [akpm@linux-foundation.org: make kallsyms_lookup_buildid() static] [cuibixuan@huawei.com: fix build error when CONFIG_SYSFS is disabled] Link: https://lkml.kernel.org/r/20210525105049.34804-1-cuibixuan@huawei.com Link: https://lkml.kernel.org/r/20210511003845.2429846-6-swboyd@chromium.org Link: https://fedoraproject.org/wiki/Releases/FeatureBuildId [1] Link: https://sourceware.org/elfutils/Debuginfod.html [2] Signed-off-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Bixuan Cui <cuibixuan@huawei.com> Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Evan Green <evgreen@chromium.org> Cc: Hsin-Yi Wang <hsinyi@chromium.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Young <dyoung@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Sasha Levin <sashal@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 04:09:20 +03:00
if (*fmt == 'B' && fmt[1] == 'b')
sprint_backtrace_build_id(sym, value);
else if (*fmt == 'B')
sprint_backtrace(sym, value);
module: add printk formats to add module build ID to stacktraces Let's make kernel stacktraces easier to identify by including the build ID[1] of a module if the stacktrace is printing a symbol from a module. This makes it simpler for developers to locate a kernel module's full debuginfo for a particular stacktrace. Combined with scripts/decode_stracktrace.sh, a developer can download the matching debuginfo from a debuginfod[2] server and find the exact file and line number for the functions plus offsets in a stacktrace that match the module. This is especially useful for pstore crash debugging where the kernel crashes are recorded in something like console-ramoops and the recovery kernel/modules are different or the debuginfo doesn't exist on the device due to space concerns (the debuginfo can be too large for space limited devices). Originally, I put this on the %pS format, but that was quickly rejected given that %pS is used in other places such as ftrace where build IDs aren't meaningful. There was some discussions on the list to put every module build ID into the "Modules linked in:" section of the stacktrace message but that quickly becomes very hard to read once you have more than three or four modules linked in. It also provides too much information when we don't expect each module to be traversed in a stacktrace. Having the build ID for modules that aren't important just makes things messy. Splitting it to multiple lines for each module quickly explodes the number of lines printed in an oops too, possibly wrapping the warning off the console. And finally, trying to stash away each module used in a callstack to provide the ID of each symbol printed is cumbersome and would require changes to each architecture to stash away modules and return their build IDs once unwinding has completed. Instead, we opt for the simpler approach of introducing new printk formats '%pS[R]b' for "pointer symbolic backtrace with module build ID" and '%pBb' for "pointer backtrace with module build ID" and then updating the few places in the architecture layer where the stacktrace is printed to use this new format. Before: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm] direct_entry+0x16c/0x1b4 [lkdtm] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 After: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] direct_entry+0x16c/0x1b4 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 [akpm@linux-foundation.org: fix build with CONFIG_MODULES=n, tweak code layout] [rdunlap@infradead.org: fix build when CONFIG_MODULES is not set] Link: https://lkml.kernel.org/r/20210513171510.20328-1-rdunlap@infradead.org [akpm@linux-foundation.org: make kallsyms_lookup_buildid() static] [cuibixuan@huawei.com: fix build error when CONFIG_SYSFS is disabled] Link: https://lkml.kernel.org/r/20210525105049.34804-1-cuibixuan@huawei.com Link: https://lkml.kernel.org/r/20210511003845.2429846-6-swboyd@chromium.org Link: https://fedoraproject.org/wiki/Releases/FeatureBuildId [1] Link: https://sourceware.org/elfutils/Debuginfod.html [2] Signed-off-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Bixuan Cui <cuibixuan@huawei.com> Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Evan Green <evgreen@chromium.org> Cc: Hsin-Yi Wang <hsinyi@chromium.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Young <dyoung@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Sasha Levin <sashal@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 04:09:20 +03:00
else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
vsprintf: introduce %pf format specifier A printf format specifier which would allow us to print a pure function name has been suggested by Andrew Morton a couple of months ago. The current %pF is very convenient to print a function symbol, but often we only want to print the name of the function, without its asm offset. That's what %pf does in this patch. The lowecase f has been chosen for its intuitive meaning of a 'weak kind of %pF'. The support for this new format would be welcome by the tracing code where the need to print pure function names is often needed. This is also true for other parts of the kernel: $ git-grep -E "kallsyms_lookup\(.+?\)" arch/blackfin/kernel/traps.c: symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); arch/powerpc/xmon/xmon.c: name = kallsyms_lookup(pc, &size, &offset, NULL, tmpstr); arch/sh/kernel/cpu/sh5/unwind.c: sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf); arch/x86/kernel/ftrace.c: kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); kernel/kprobes.c: sym = kallsyms_lookup((unsigned long)p->addr, NULL, kernel/lockdep.c: return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); kernel/trace/ftrace.c: kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); kernel/trace/ftrace.c: kallsyms_lookup(*ptr, NULL, NULL, NULL, str); kernel/trace/trace_functions.c: kallsyms_lookup(ip, NULL, NULL, NULL, str); kernel/trace/trace_output.c: kallsyms_lookup(address, NULL, NULL, NULL, str); Changes in v2: - Add the explanation of the %pf role for vsnprintf() and bstr_printf() - Change the comments by dropping the "asm offset" notion and only define the %pf against the actual function offset notion. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Mike Frysinger <vapier@gentoo.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Zhaolei <zhaolei@cn.fujitsu.com> Cc: Tom Zanussi <tzanussi@gmail.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20090415154817.GC5989@nowhere> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-15 19:48:18 +04:00
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
return string_nocheck(buf, end, sym, spec);
#else
return special_hex_number(buf, end, value, sizeof(void *));
#endif
}
static const struct printf_spec default_str_spec = {
.field_width = -1,
.precision = -1,
};
static const struct printf_spec default_flag_spec = {
.base = 16,
.precision = -1,
.flags = SPECIAL | SMALL,
};
static const struct printf_spec default_dec_spec = {
.base = 10,
.precision = -1,
};
static const struct printf_spec default_dec02_spec = {
.base = 10,
.field_width = 2,
.precision = -1,
.flags = ZEROPAD,
};
static const struct printf_spec default_dec04_spec = {
.base = 10,
.field_width = 4,
.precision = -1,
.flags = ZEROPAD,
};
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
{
#ifndef IO_RSRC_PRINTK_SIZE
#define IO_RSRC_PRINTK_SIZE 6
#endif
#ifndef MEM_RSRC_PRINTK_SIZE
#define MEM_RSRC_PRINTK_SIZE 10
#endif
static const struct printf_spec io_spec = {
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
.base = 16,
.field_width = IO_RSRC_PRINTK_SIZE,
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec mem_spec = {
.base = 16,
.field_width = MEM_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec bus_spec = {
.base = 16,
.field_width = 2,
.precision = -1,
.flags = SMALL | ZEROPAD,
};
static const struct printf_spec str_spec = {
.field_width = -1,
.precision = 10,
.flags = LEFT,
};
/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
int decode = (fmt[0] == 'R') ? 1 : 0;
const struct printf_spec *specp;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, res, spec))
return buf;
*p++ = '[';
if (res->flags & IORESOURCE_IO) {
p = string_nocheck(p, pend, "io ", str_spec);
specp = &io_spec;
} else if (res->flags & IORESOURCE_MEM) {
p = string_nocheck(p, pend, "mem ", str_spec);
specp = &mem_spec;
} else if (res->flags & IORESOURCE_IRQ) {
p = string_nocheck(p, pend, "irq ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_DMA) {
p = string_nocheck(p, pend, "dma ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_BUS) {
p = string_nocheck(p, pend, "bus ", str_spec);
specp = &bus_spec;
} else {
p = string_nocheck(p, pend, "??? ", str_spec);
specp = &mem_spec;
decode = 0;
}
if (decode && res->flags & IORESOURCE_UNSET) {
p = string_nocheck(p, pend, "size ", str_spec);
p = number(p, pend, resource_size(res), *specp);
} else {
p = number(p, pend, res->start, *specp);
if (res->start != res->end) {
*p++ = '-';
p = number(p, pend, res->end, *specp);
}
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
p = string_nocheck(p, pend, " 64bit", str_spec);
if (res->flags & IORESOURCE_PREFETCH)
p = string_nocheck(p, pend, " pref", str_spec);
if (res->flags & IORESOURCE_WINDOW)
p = string_nocheck(p, pend, " window", str_spec);
if (res->flags & IORESOURCE_DISABLED)
p = string_nocheck(p, pend, " disabled", str_spec);
} else {
p = string_nocheck(p, pend, " flags ", str_spec);
p = number(p, pend, res->flags, default_flag_spec);
}
*p++ = ']';
*p = '\0';
return string_nocheck(buf, end, sym, spec);
}
static noinline_for_stack
char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
int i, len = 1; /* if we pass '%ph[CDN]', field width remains
negative value, fallback to the default */
char separator;
if (spec.field_width == 0)
/* nothing to print */
return buf;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'C':
separator = ':';
break;
case 'D':
separator = '-';
break;
case 'N':
separator = 0;
break;
default:
separator = ' ';
break;
}
if (spec.field_width > 0)
len = min_t(int, spec.field_width, 64);
for (i = 0; i < len; ++i) {
if (buf < end)
*buf = hex_asc_hi(addr[i]);
++buf;
if (buf < end)
*buf = hex_asc_lo(addr[i]);
++buf;
if (separator && i != len - 1) {
if (buf < end)
*buf = separator;
++buf;
}
}
return buf;
}
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
static noinline_for_stack
char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
int nr_bits = max_t(int, spec.field_width, 0);
int i, chunksz;
bool first = true;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, bitmap, spec))
return buf;
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
/* reused to print numbers */
spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
chunksz = nr_bits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
u32 chunkmask, val;
int word, bit;
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (bitmap[word] >> bit) & chunkmask;
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
spec.field_width = DIV_ROUND_UP(chunksz, 4);
buf = number(buf, end, val, spec);
chunksz = CHUNKSZ;
}
return buf;
}
static noinline_for_stack
char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
bool first = true;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, bitmap, spec))
return buf;
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
cur = find_next_bit(bitmap, nr_bits, cur + 1);
if (cur < nr_bits && cur <= rtop + 1)
continue;
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
buf = number(buf, end, rbot, default_dec_spec);
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
buf = number(buf, end, rtop, default_dec_spec);
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
}
rbot = cur;
}
return buf;
}
static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
char *p = mac_addr;
int i;
char separator;
bool reversed = false;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
separator = '-';
break;
case 'R':
reversed = true;
fallthrough;
default:
separator = ':';
break;
}
for (i = 0; i < 6; i++) {
if (reversed)
p = hex_byte_pack(p, addr[5 - i]);
else
p = hex_byte_pack(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
*p++ = separator;
}
*p = '\0';
return string_nocheck(buf, end, mac_addr, spec);
}
static noinline_for_stack
char *ip4_string(char *p, const u8 *addr, const char *fmt)
{
int i;
bool leading_zeros = (fmt[0] == 'i');
int index;
int step;
switch (fmt[2]) {
case 'h':
#ifdef __BIG_ENDIAN
index = 0;
step = 1;
#else
index = 3;
step = -1;
#endif
break;
case 'l':
index = 3;
step = -1;
break;
case 'n':
case 'b':
default:
index = 0;
step = 1;
break;
}
for (i = 0; i < 4; i++) {
lib/vsprintf.c: even faster binary to decimal conversion The most expensive part of decimal conversion is the divisions by 10 (albeit done using reciprocal multiplication with appropriately chosen constants). I decided to see if one could eliminate around half of these multiplications by emitting two digits at a time, at the cost of a 200 byte lookup table, and it does indeed seem like there is something to be gained, especially on 64 bits. Microbenchmarking shows improvements ranging from -50% (for numbers uniformly distributed in [0, 2^64-1]) to -25% (for numbers heavily biased toward the smaller end, a more realistic distribution). On a larger scale, perf shows that top, one of the big consumers of /proc data, uses 0.5-1.0% fewer cpu cycles. I had to jump through some hoops to get the 32 bit code to compile and run on my 64 bit machine, so I'm not sure how relevant these numbers are, but just for comparison the microbenchmark showed improvements between -30% and -10%. The bloat-o-meter costs are around 150 bytes (the generated code is a little smaller, so it's not the full 200 bytes) on both 32 and 64 bit. I'm aware that extra cache misses won't show up in a microbenchmark as used above, but on the other hand decimal conversions often happen in bulk (for example in the case of top). I have of course tested that the new code generates the same output as the old, for both the first and last 1e10 numbers in [0,2^64-1] and 4e9 'random' numbers in-between. Test and verification code on github: https://github.com/Villemoes/dec. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Tested-by: Jeff Epler <jepler@unpythonic.net> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 22:43:22 +03:00
char temp[4] __aligned(2); /* hold each IP quad in reverse order */
vsprintf: further optimize decimal conversion Previous code was using optimizations which were developed to work well even on narrow-word CPUs (by today's standards). But Linux runs only on 32-bit and wider CPUs. We can use that. First: using 32x32->64 multiply and trivial 32-bit shift, we can correctly divide by 10 much larger numbers, and thus we can print groups of 9 digits instead of groups of 5 digits. Next: there are two algorithms to print larger numbers. One is generic: divide by 1000000000 and repeatedly print groups of (up to) 9 digits. It's conceptually simple, but requires an (unsigned long long) / 1000000000 division. Second algorithm splits 64-bit unsigned long long into 16-bit chunks, manipulates them cleverly and generates groups of 4 decimal digits. It so happens that it does NOT require long long division. If long is > 32 bits, division of 64-bit values is relatively easy, and we will use the first algorithm. If long long is > 64 bits (strange architecture with VERY large long long), second algorithm can't be used, and we again use the first one. Else (if long is 32 bits and long long is 64 bits) we use second one. And third: there is a simple optimization which takes fast path not only for zero as was done before, but for all one-digit numbers. In all tested cases new code is faster than old one, in many cases by 30%, in few cases by more than 50% (for example, on x86-32, conversion of 12345678). Code growth is ~0 in 32-bit case and ~130 bytes in 64-bit case. This patch is based upon an original from Michal Nazarewicz. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Cc: Douglas W Jones <jones@cs.uiowa.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-06-01 03:26:08 +04:00
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
*p++ = '0';
if (digits < 2)
*p++ = '0';
}
/* reverse the digits in the quad */
while (digits--)
*p++ = temp[digits];
if (i < 3)
*p++ = '.';
index += step;
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_compressed_string(char *p, const char *addr)
{
int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
memcpy(&in6, addr, sizeof(struct in6_addr));
useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
memset(zerolength, 0, sizeof(zerolength));
if (useIPv4)
range = 6;
else
range = 8;
/* find position of longest 0 run */
for (i = 0; i < range; i++) {
for (j = i; j < range; j++) {
if (in6.s6_addr16[j] != 0)
break;
zerolength[i]++;
}
}
for (i = 0; i < range; i++) {
if (zerolength[i] > longest) {
longest = zerolength[i];
colonpos = i;
}
}
if (longest == 1) /* don't compress a single 0 */
colonpos = -1;
/* emit address */
for (i = 0; i < range; i++) {
if (i == colonpos) {
if (needcolon || i == 0)
*p++ = ':';
*p++ = ':';
needcolon = false;
i += longest - 1;
continue;
}
if (needcolon) {
*p++ = ':';
needcolon = false;
}
/* hex u16 without leading 0s */
word = ntohs(in6.s6_addr16[i]);
hi = word >> 8;
lo = word & 0xff;
if (hi) {
if (hi > 0x0f)
p = hex_byte_pack(p, hi);
else
*p++ = hex_asc_lo(hi);
p = hex_byte_pack(p, lo);
}
else if (lo > 0x0f)
p = hex_byte_pack(p, lo);
else
*p++ = hex_asc_lo(lo);
needcolon = true;
}
if (useIPv4) {
if (needcolon)
*p++ = ':';
p = ip4_string(p, &in6.s6_addr[12], "I4");
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
for (i = 0; i < 8; i++) {
p = hex_byte_pack(p, *addr++);
p = hex_byte_pack(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
if (fmt[0] == 'I' && fmt[2] == 'c')
ip6_compressed_string(ip6_addr, addr);
else
ip6_string(ip6_addr, addr, fmt);
return string_nocheck(buf, end, ip6_addr, spec);
}
static noinline_for_stack
char *ip4_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip4_addr[sizeof("255.255.255.255")];
ip4_string(ip4_addr, addr, fmt);
return string_nocheck(buf, end, ip4_addr, spec);
}
lib: vsprintf: add IPv4/v6 generic %p[Ii]S[pfs] format specifier In order to avoid making code that deals with printing both, IPv4 and IPv6 addresses, unnecessary complicated as for example ... if (sa.sa_family == AF_INET6) printk("... %pI6 ...", ..sin6_addr); else printk("... %pI4 ...", ..sin_addr.s_addr); ... it would be better to introduce a format specifier that can deal with those kind of situations internally; just as we have a "struct sockaddr" for generic mapping into "struct sockaddr_in" or "struct sockaddr_in6" as e.g. done in "union sctp_addr". Then, we could reduce the above statement into something like: printk("... %pIS ..", &sockaddr); In case our pointer is NULL, pointer() then deals with that already at an earlier point in time internally. While we're at it, support for both %piS/%pIS, where 'S' stands for sockaddr, comes (almost) for free. Additionally to that, postfix specifiers 'p', 'f' and 's' are supported as suggested and initially implemented in 2009 by Joe Perches [1]. Handling of those additional specifiers orientate on the initial RFC that was proposed. Also we support IPv6 compressed format specified by 'c' and various other IPv4 extensions as stated in the documentation part. Likely, there are many other areas than just SCTP in the kernel to make use of this extension as well. [1] http://patchwork.ozlabs.org/patch/31480/ Signed-off-by: Daniel Borkmann <dborkman@redhat.com> CC: Joe Perches <joe@perches.com> CC: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:39 +04:00
static noinline_for_stack
char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false, have_s = false, have_f = false, have_c = false;
char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") +
sizeof(":12345") + sizeof("/123456789") +
sizeof("%1234567890")];
char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
const u8 *addr = (const u8 *) &sa->sin6_addr;
char fmt6[2] = { fmt[0], '6' };
u8 off = 0;
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'f':
have_f = true;
break;
case 's':
have_s = true;
break;
case 'c':
have_c = true;
break;
}
}
if (have_p || have_s || have_f) {
*p = '[';
off = 1;
}
if (fmt6[0] == 'I' && have_c)
p = ip6_compressed_string(ip6_addr + off, addr);
else
p = ip6_string(ip6_addr + off, addr, fmt6);
if (have_p || have_s || have_f)
*p++ = ']';
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin6_port), spec);
}
if (have_f) {
*p++ = '/';
p = number(p, pend, ntohl(sa->sin6_flowinfo &
IPV6_FLOWINFO_MASK), spec);
}
if (have_s) {
*p++ = '%';
p = number(p, pend, sa->sin6_scope_id, spec);
}
*p = '\0';
return string_nocheck(buf, end, ip6_addr, spec);
lib: vsprintf: add IPv4/v6 generic %p[Ii]S[pfs] format specifier In order to avoid making code that deals with printing both, IPv4 and IPv6 addresses, unnecessary complicated as for example ... if (sa.sa_family == AF_INET6) printk("... %pI6 ...", ..sin6_addr); else printk("... %pI4 ...", ..sin_addr.s_addr); ... it would be better to introduce a format specifier that can deal with those kind of situations internally; just as we have a "struct sockaddr" for generic mapping into "struct sockaddr_in" or "struct sockaddr_in6" as e.g. done in "union sctp_addr". Then, we could reduce the above statement into something like: printk("... %pIS ..", &sockaddr); In case our pointer is NULL, pointer() then deals with that already at an earlier point in time internally. While we're at it, support for both %piS/%pIS, where 'S' stands for sockaddr, comes (almost) for free. Additionally to that, postfix specifiers 'p', 'f' and 's' are supported as suggested and initially implemented in 2009 by Joe Perches [1]. Handling of those additional specifiers orientate on the initial RFC that was proposed. Also we support IPv6 compressed format specified by 'c' and various other IPv4 extensions as stated in the documentation part. Likely, there are many other areas than just SCTP in the kernel to make use of this extension as well. [1] http://patchwork.ozlabs.org/patch/31480/ Signed-off-by: Daniel Borkmann <dborkman@redhat.com> CC: Joe Perches <joe@perches.com> CC: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:39 +04:00
}
static noinline_for_stack
char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false;
char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
char *pend = ip4_addr + sizeof(ip4_addr);
const u8 *addr = (const u8 *) &sa->sin_addr.s_addr;
char fmt4[3] = { fmt[0], '4', 0 };
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'h':
case 'l':
case 'n':
case 'b':
fmt4[2] = *fmt;
break;
}
}
p = ip4_string(ip4_addr, addr, fmt4);
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin_port), spec);
}
*p = '\0';
return string_nocheck(buf, end, ip4_addr, spec);
lib: vsprintf: add IPv4/v6 generic %p[Ii]S[pfs] format specifier In order to avoid making code that deals with printing both, IPv4 and IPv6 addresses, unnecessary complicated as for example ... if (sa.sa_family == AF_INET6) printk("... %pI6 ...", ..sin6_addr); else printk("... %pI4 ...", ..sin_addr.s_addr); ... it would be better to introduce a format specifier that can deal with those kind of situations internally; just as we have a "struct sockaddr" for generic mapping into "struct sockaddr_in" or "struct sockaddr_in6" as e.g. done in "union sctp_addr". Then, we could reduce the above statement into something like: printk("... %pIS ..", &sockaddr); In case our pointer is NULL, pointer() then deals with that already at an earlier point in time internally. While we're at it, support for both %piS/%pIS, where 'S' stands for sockaddr, comes (almost) for free. Additionally to that, postfix specifiers 'p', 'f' and 's' are supported as suggested and initially implemented in 2009 by Joe Perches [1]. Handling of those additional specifiers orientate on the initial RFC that was proposed. Also we support IPv6 compressed format specified by 'c' and various other IPv4 extensions as stated in the documentation part. Likely, there are many other areas than just SCTP in the kernel to make use of this extension as well. [1] http://patchwork.ozlabs.org/patch/31480/ Signed-off-by: Daniel Borkmann <dborkman@redhat.com> CC: Joe Perches <joe@perches.com> CC: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:39 +04:00
}
static noinline_for_stack
char *ip_addr_string(char *buf, char *end, const void *ptr,
struct printf_spec spec, const char *fmt)
{
char *err_fmt_msg;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, ptr, spec))
return buf;
switch (fmt[1]) {
case '6':
return ip6_addr_string(buf, end, ptr, spec, fmt);
case '4':
return ip4_addr_string(buf, end, ptr, spec, fmt);
case 'S': {
const union {
struct sockaddr raw;
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} *sa = ptr;
switch (sa->raw.sa_family) {
case AF_INET:
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
case AF_INET6:
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
default:
return error_string(buf, end, "(einval)", spec);
}}
}
err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
return error_string(buf, end, err_fmt_msg, spec);
}
static noinline_for_stack
char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
bool found = true;
int count = 1;
unsigned int flags = 0;
int len;
if (spec.field_width == 0)
return buf; /* nothing to print */
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
do {
switch (fmt[count++]) {
case 'a':
flags |= ESCAPE_ANY;
break;
case 'c':
flags |= ESCAPE_SPECIAL;
break;
case 'h':
flags |= ESCAPE_HEX;
break;
case 'n':
flags |= ESCAPE_NULL;
break;
case 'o':
flags |= ESCAPE_OCTAL;
break;
case 'p':
flags |= ESCAPE_NP;
break;
case 's':
flags |= ESCAPE_SPACE;
break;
default:
found = false;
break;
}
} while (found);
if (!flags)
flags = ESCAPE_ANY_NP;
len = spec.field_width < 0 ? 1 : spec.field_width;
lib/string_helpers.c: change semantics of string_escape_mem The current semantics of string_escape_mem are inadequate for one of its current users, vsnprintf(). If that is to honour its contract, it must know how much space would be needed for the entire escaped buffer, and string_escape_mem provides no way of obtaining that (short of allocating a large enough buffer (~4 times input string) to let it play with, and that's definitely a big no-no inside vsnprintf). So change the semantics for string_escape_mem to be more snprintf-like: Return the size of the output that would be generated if the destination buffer was big enough, but of course still only write to the part of dst it is allowed to, and (contrary to snprintf) don't do '\0'-termination. It is then up to the caller to detect whether output was truncated and to append a '\0' if desired. Also, we must output partial escape sequences, otherwise a call such as snprintf(buf, 3, "%1pE", "\123") would cause printf to write a \0 to buf[2] but leaving buf[0] and buf[1] with whatever they previously contained. This also fixes a bug in the escaped_string() helper function, which used to unconditionally pass a length of "end-buf" to string_escape_mem(); since the latter doesn't check osz for being insanely large, it would happily write to dst. For example, kasprintf(GFP_KERNEL, "something and then %pE", ...); is an easy way to trigger an oops. In test-string_helpers.c, the -ENOMEM test is replaced with testing for getting the expected return value even if the buffer is too small. We also ensure that nothing is written (by relying on a NULL pointer deref) if the output size is 0 by passing NULL - this has to work for kasprintf("%pE") to work. In net/sunrpc/cache.c, I think qword_add still has the same semantics. Someone should definitely double-check this. In fs/proc/array.c, I made the minimum possible change, but longer-term it should stop poking around in seq_file internals. [andriy.shevchenko@linux.intel.com: simplify qword_add] [andriy.shevchenko@linux.intel.com: add missed curly braces] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-04-16 02:17:28 +03:00
/*
* string_escape_mem() writes as many characters as it can to
* the given buffer, and returns the total size of the output
* had the buffer been big enough.
*/
buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL);
return buf;
}
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
struct printf_spec spec, const char *fmt)
{
va_list va;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, va_fmt, spec))
return buf;
va_copy(va, *va_fmt->va);
buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
va_end(va);
return buf;
}
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char uuid[UUID_STRING_LEN + 1];
char *p = uuid;
int i;
const u8 *index = uuid_index;
bool uc = false;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (*(++fmt)) {
case 'L':
uc = true;
fallthrough;
case 'l':
index = guid_index;
break;
case 'B':
uc = true;
break;
}
for (i = 0; i < 16; i++) {
if (uc)
p = hex_byte_pack_upper(p, addr[index[i]]);
else
p = hex_byte_pack(p, addr[index[i]]);
switch (i) {
case 3:
case 5:
case 7:
case 9:
*p++ = '-';
break;
}
}
*p = 0;
return string_nocheck(buf, end, uuid, spec);
}
static noinline_for_stack
char *netdev_bits(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
num = *(const netdev_features_t *)addr;
size = sizeof(netdev_features_t);
break;
default:
return error_string(buf, end, "(%pN?)", spec);
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *fourcc_string(char *buf, char *end, const u32 *fourcc,
struct printf_spec spec, const char *fmt)
{
char output[sizeof("0123 little-endian (0x01234567)")];
char *p = output;
unsigned int i;
u32 val;
if (fmt[1] != 'c' || fmt[2] != 'c')
return error_string(buf, end, "(%p4?)", spec);
if (check_pointer(&buf, end, fourcc, spec))
return buf;
val = *fourcc & ~BIT(31);
for (i = 0; i < sizeof(*fourcc); i++) {
unsigned char c = val >> (i * 8);
/* Print non-control ASCII characters as-is, dot otherwise */
*p++ = isascii(c) && isprint(c) ? c : '.';
}
strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian");
p += strlen(p);
*p++ = ' ';
*p++ = '(';
p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32));
*p++ = ')';
*p = '\0';
return string(buf, end, output, spec);
}
static noinline_for_stack
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'd':
num = *(const dma_addr_t *)addr;
size = sizeof(dma_addr_t);
break;
case 'p':
default:
num = *(const phys_addr_t *)addr;
size = sizeof(phys_addr_t);
break;
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
int year = tm->tm_year + (r ? 0 : 1900);
int mon = tm->tm_mon + (r ? 0 : 1);
buf = number(buf, end, year, default_dec04_spec);
if (buf < end)
*buf = '-';
buf++;
buf = number(buf, end, mon, default_dec02_spec);
if (buf < end)
*buf = '-';
buf++;
return number(buf, end, tm->tm_mday, default_dec02_spec);
}
static noinline_for_stack
char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
buf = number(buf, end, tm->tm_hour, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
buf = number(buf, end, tm->tm_min, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
return number(buf, end, tm->tm_sec, default_dec02_spec);
}
static noinline_for_stack
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
bool raw = false, iso8601_separator = true;
bool found = true;
int count = 2;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, tm, spec))
return buf;
switch (fmt[count]) {
case 'd':
have_t = false;
count++;
break;
case 't':
have_d = false;
count++;
break;
}
do {
switch (fmt[count++]) {
case 'r':
raw = true;
break;
case 's':
iso8601_separator = false;
break;
default:
found = false;
break;
}
} while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
if (buf < end)
*buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
buf = time_str(buf, end, tm, raw);
return buf;
}
static noinline_for_stack
char *time64_str(char *buf, char *end, const time64_t time,
struct printf_spec spec, const char *fmt)
{
struct rtc_time rtc_time;
struct tm tm;
time64_to_tm(time, 0, &tm);
rtc_time.tm_sec = tm.tm_sec;
rtc_time.tm_min = tm.tm_min;
rtc_time.tm_hour = tm.tm_hour;
rtc_time.tm_mday = tm.tm_mday;
rtc_time.tm_mon = tm.tm_mon;
rtc_time.tm_year = tm.tm_year;
rtc_time.tm_wday = tm.tm_wday;
rtc_time.tm_yday = tm.tm_yday;
rtc_time.tm_isdst = 0;
return rtc_str(buf, end, &rtc_time, spec, fmt);
}
static noinline_for_stack
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
const char *fmt)
{
switch (fmt[1]) {
case 'R':
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
case 'T':
return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
default:
return error_string(buf, end, "(%pt?)", spec);
}
}
static noinline_for_stack
char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
const char *fmt)
{
if (!IS_ENABLED(CONFIG_HAVE_CLK))
return error_string(buf, end, "(%pC?)", spec);
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, clk, spec))
return buf;
switch (fmt[1]) {
case 'n':
default:
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
return ptr_to_id(buf, end, clk, spec);
#endif
}
}
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
static
char *format_flags(char *buf, char *end, unsigned long flags,
const struct trace_print_flags *names)
{
unsigned long mask;
for ( ; flags && names->name; names++) {
mask = names->mask;
if ((flags & mask) != mask)
continue;
buf = string(buf, end, names->name, default_str_spec);
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
flags &= ~mask;
if (flags) {
if (buf < end)
*buf = '|';
buf++;
}
}
if (flags)
buf = number(buf, end, flags, default_flag_spec);
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
return buf;
}
vsprintf: dump full information of page flags in pGp Currently the pGp only shows the names of page flags, rather than the full information including section, node, zone, last cpupid and kasan tag. While it is not easy to parse these information manually because there're so many flavors. Let's interpret them in pGp as well. To be compitable with the existed format of pGp, the new introduced ones also use '|' as the separator, then the user tools parsing pGp won't need to make change, suggested by Matthew. The new information is tracked onto the end of the existed one. On example of the output in mm/slub.c as follows, - Before the patch, [ 6343.396602] Slab 0x000000004382e02b objects=33 used=3 fp=0x000000009ae06ffc flags=0x17ffffc0010200(slab|head) - After the patch, [ 8448.272530] Slab 0x0000000090797883 objects=33 used=3 fp=0x00000000790f1c26 flags=0x17ffffc0010200(slab|head|node=0|zone=2|lastcpupid=0x1fffff) The documentation and test cases are also updated. The output of the test cases as follows, [68599.816764] test_printf: loaded. [68599.819068] test_printf: all 388 tests passed [68599.830367] test_printf: unloaded. [lkp@intel.com: reported issues in the prev version in test_printf.c] Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: kernel test robot <lkp@intel.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210319101246.73513-4-laoar.shao@gmail.com
2021-03-19 13:12:46 +03:00
struct page_flags_fields {
int width;
int shift;
int mask;
const struct printf_spec *spec;
const char *name;
};
static const struct page_flags_fields pff[] = {
{SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
&default_dec_spec, "section"},
{NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
&default_dec_spec, "node"},
{ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
&default_dec_spec, "zone"},
{LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
&default_flag_spec, "lastcpupid"},
{KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
&default_flag_spec, "kasantag"},
};
static
char *format_page_flags(char *buf, char *end, unsigned long flags)
{
unsigned long main_flags = flags & PAGEFLAGS_MASK;
vsprintf: dump full information of page flags in pGp Currently the pGp only shows the names of page flags, rather than the full information including section, node, zone, last cpupid and kasan tag. While it is not easy to parse these information manually because there're so many flavors. Let's interpret them in pGp as well. To be compitable with the existed format of pGp, the new introduced ones also use '|' as the separator, then the user tools parsing pGp won't need to make change, suggested by Matthew. The new information is tracked onto the end of the existed one. On example of the output in mm/slub.c as follows, - Before the patch, [ 6343.396602] Slab 0x000000004382e02b objects=33 used=3 fp=0x000000009ae06ffc flags=0x17ffffc0010200(slab|head) - After the patch, [ 8448.272530] Slab 0x0000000090797883 objects=33 used=3 fp=0x00000000790f1c26 flags=0x17ffffc0010200(slab|head|node=0|zone=2|lastcpupid=0x1fffff) The documentation and test cases are also updated. The output of the test cases as follows, [68599.816764] test_printf: loaded. [68599.819068] test_printf: all 388 tests passed [68599.830367] test_printf: unloaded. [lkp@intel.com: reported issues in the prev version in test_printf.c] Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: kernel test robot <lkp@intel.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210319101246.73513-4-laoar.shao@gmail.com
2021-03-19 13:12:46 +03:00
bool append = false;
int i;
buf = number(buf, end, flags, default_flag_spec);
if (buf < end)
*buf = '(';
buf++;
vsprintf: dump full information of page flags in pGp Currently the pGp only shows the names of page flags, rather than the full information including section, node, zone, last cpupid and kasan tag. While it is not easy to parse these information manually because there're so many flavors. Let's interpret them in pGp as well. To be compitable with the existed format of pGp, the new introduced ones also use '|' as the separator, then the user tools parsing pGp won't need to make change, suggested by Matthew. The new information is tracked onto the end of the existed one. On example of the output in mm/slub.c as follows, - Before the patch, [ 6343.396602] Slab 0x000000004382e02b objects=33 used=3 fp=0x000000009ae06ffc flags=0x17ffffc0010200(slab|head) - After the patch, [ 8448.272530] Slab 0x0000000090797883 objects=33 used=3 fp=0x00000000790f1c26 flags=0x17ffffc0010200(slab|head|node=0|zone=2|lastcpupid=0x1fffff) The documentation and test cases are also updated. The output of the test cases as follows, [68599.816764] test_printf: loaded. [68599.819068] test_printf: all 388 tests passed [68599.830367] test_printf: unloaded. [lkp@intel.com: reported issues in the prev version in test_printf.c] Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: kernel test robot <lkp@intel.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210319101246.73513-4-laoar.shao@gmail.com
2021-03-19 13:12:46 +03:00
/* Page flags from the main area. */
if (main_flags) {
buf = format_flags(buf, end, main_flags, pageflag_names);
append = true;
}
/* Page flags from the fields area */
for (i = 0; i < ARRAY_SIZE(pff); i++) {
/* Skip undefined fields. */
if (!pff[i].width)
continue;
/* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
if (append) {
if (buf < end)
*buf = '|';
buf++;
}
buf = string(buf, end, pff[i].name, default_str_spec);
if (buf < end)
*buf = '=';
buf++;
buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
*pff[i].spec);
append = true;
}
if (buf < end)
*buf = ')';
buf++;
vsprintf: dump full information of page flags in pGp Currently the pGp only shows the names of page flags, rather than the full information including section, node, zone, last cpupid and kasan tag. While it is not easy to parse these information manually because there're so many flavors. Let's interpret them in pGp as well. To be compitable with the existed format of pGp, the new introduced ones also use '|' as the separator, then the user tools parsing pGp won't need to make change, suggested by Matthew. The new information is tracked onto the end of the existed one. On example of the output in mm/slub.c as follows, - Before the patch, [ 6343.396602] Slab 0x000000004382e02b objects=33 used=3 fp=0x000000009ae06ffc flags=0x17ffffc0010200(slab|head) - After the patch, [ 8448.272530] Slab 0x0000000090797883 objects=33 used=3 fp=0x00000000790f1c26 flags=0x17ffffc0010200(slab|head|node=0|zone=2|lastcpupid=0x1fffff) The documentation and test cases are also updated. The output of the test cases as follows, [68599.816764] test_printf: loaded. [68599.819068] test_printf: all 388 tests passed [68599.830367] test_printf: unloaded. [lkp@intel.com: reported issues in the prev version in test_printf.c] Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: kernel test robot <lkp@intel.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210319101246.73513-4-laoar.shao@gmail.com
2021-03-19 13:12:46 +03:00
return buf;
}
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
{
unsigned long flags;
const struct trace_print_flags *names;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, flags_ptr, spec))
return buf;
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
switch (fmt[1]) {
case 'p':
vsprintf: dump full information of page flags in pGp Currently the pGp only shows the names of page flags, rather than the full information including section, node, zone, last cpupid and kasan tag. While it is not easy to parse these information manually because there're so many flavors. Let's interpret them in pGp as well. To be compitable with the existed format of pGp, the new introduced ones also use '|' as the separator, then the user tools parsing pGp won't need to make change, suggested by Matthew. The new information is tracked onto the end of the existed one. On example of the output in mm/slub.c as follows, - Before the patch, [ 6343.396602] Slab 0x000000004382e02b objects=33 used=3 fp=0x000000009ae06ffc flags=0x17ffffc0010200(slab|head) - After the patch, [ 8448.272530] Slab 0x0000000090797883 objects=33 used=3 fp=0x00000000790f1c26 flags=0x17ffffc0010200(slab|head|node=0|zone=2|lastcpupid=0x1fffff) The documentation and test cases are also updated. The output of the test cases as follows, [68599.816764] test_printf: loaded. [68599.819068] test_printf: all 388 tests passed [68599.830367] test_printf: unloaded. [lkp@intel.com: reported issues in the prev version in test_printf.c] Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: kernel test robot <lkp@intel.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210319101246.73513-4-laoar.shao@gmail.com
2021-03-19 13:12:46 +03:00
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
break;
case 'g':
flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
names = gfpflag_names;
break;
default:
return error_string(buf, end, "(%pG?)", spec);
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
}
return format_flags(buf, end, flags, names);
}
static noinline_for_stack
char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
char *end)
{
int depth;
/* Loop starting from the root node to the current node. */
for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
struct fwnode_handle *__fwnode =
fwnode_get_nth_parent(fwnode, depth);
buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
fwnode_handle_put(__fwnode);
}
return buf;
}
static noinline_for_stack
char *device_node_string(char *buf, char *end, struct device_node *dn,
struct printf_spec spec, const char *fmt)
{
char tbuf[sizeof("xxxx") + 1];
const char *p;
int ret;
char *buf_start = buf;
struct property *prop;
bool has_mult, pass;
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
if (fmt[0] != 'F')
return error_string(buf, end, "(%pO?)", spec);
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
if (check_pointer(&buf, end, dn, spec))
return buf;
/* simple case without anything any more format specifiers */
fmt++;
if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
int precision;
if (pass) {
if (buf < end)
*buf = ':';
buf++;
}
switch (*fmt) {
case 'f': /* full_name */
buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
end);
break;
case 'n': /* name */
p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
break;
case 'P': /* path-spec */
p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
break;
case 'F': /* flags */
tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
tbuf[4] = 0;
buf = string_nocheck(buf, end, tbuf, str_spec);
break;
case 'c': /* major compatible string */
ret = of_property_read_string(dn, "compatible", &p);
if (!ret)
buf = string(buf, end, p, str_spec);
break;
case 'C': /* full compatible string */
has_mult = false;
of_property_for_each_string(dn, "compatible", prop, p) {
if (has_mult)
buf = string_nocheck(buf, end, ",", str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
buf = string(buf, end, p, str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
has_mult = true;
}
break;
default:
break;
}
}
return widen_string(buf, buf - buf_start, end, spec);
}
static noinline_for_stack
char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
struct printf_spec spec, const char *fmt)
{
struct printf_spec str_spec = spec;
char *buf_start = buf;
str_spec.field_width = -1;
if (*fmt != 'w')
return error_string(buf, end, "(%pf?)", spec);
if (check_pointer(&buf, end, fwnode, spec))
return buf;
fmt++;
switch (*fmt) {
case 'P': /* name */
buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
break;
case 'f': /* full_name */
default:
buf = fwnode_full_name_string(fwnode, buf, end);
break;
}
return widen_string(buf, buf - buf_start, end, spec);
}
/* Disable pointer hashing if requested */
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
slub: force on no_hash_pointers when slub_debug is enabled Obscuring the pointers that slub shows when debugging makes for some confusing slub debug messages: Padding overwritten. 0x0000000079f0674a-0x000000000d4dce17 Those addresses are hashed for kernel security reasons. If we're trying to be secure with slub_debug on the commandline we have some big problems given that we dump whole chunks of kernel memory to the kernel logs. Let's force on the no_hash_pointers commandline flag when slub_debug is on the commandline. This makes slub debug messages more meaningful and if by chance a kernel address is in some slub debug object dump we will have a better chance of figuring out what went wrong. Note that we don't use %px in the slub code because we want to reduce the number of places that %px is used in the kernel. This also nicely prints a big fat warning at kernel boot if slub_debug is on the commandline so that we know that this kernel shouldn't be used on production systems. [akpm@linux-foundation.org: fix build with CONFIG_SLUB_DEBUG=n] Link: https://lkml.kernel.org/r/20210601182202.3011020-5-swboyd@chromium.org Signed-off-by: Stephen Boyd <swboyd@chromium.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Petr Mladek <pmladek@suse.com> Cc: Joe Perches <joe@perches.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-06-29 05:34:52 +03:00
int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
return 0;
no_hash_pointers = true;
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** This system shows unhashed kernel memory addresses **\n");
pr_warn("** via the console, logs, and other interfaces. This **\n");
pr_warn("** might reduce the security of your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging **\n");
pr_warn("** the kernel, report this immediately to your system **\n");
pr_warn("** administrator! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
return 0;
}
early_param("no_hash_pointers", no_hash_pointers_enable);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
* Please update scripts/checkpatch.pl when adding/removing conversion
* characters. (Search for "check for vsprintf extension").
*
* Right now we handle:
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
module: add printk formats to add module build ID to stacktraces Let's make kernel stacktraces easier to identify by including the build ID[1] of a module if the stacktrace is printing a symbol from a module. This makes it simpler for developers to locate a kernel module's full debuginfo for a particular stacktrace. Combined with scripts/decode_stracktrace.sh, a developer can download the matching debuginfo from a debuginfod[2] server and find the exact file and line number for the functions plus offsets in a stacktrace that match the module. This is especially useful for pstore crash debugging where the kernel crashes are recorded in something like console-ramoops and the recovery kernel/modules are different or the debuginfo doesn't exist on the device due to space concerns (the debuginfo can be too large for space limited devices). Originally, I put this on the %pS format, but that was quickly rejected given that %pS is used in other places such as ftrace where build IDs aren't meaningful. There was some discussions on the list to put every module build ID into the "Modules linked in:" section of the stacktrace message but that quickly becomes very hard to read once you have more than three or four modules linked in. It also provides too much information when we don't expect each module to be traversed in a stacktrace. Having the build ID for modules that aren't important just makes things messy. Splitting it to multiple lines for each module quickly explodes the number of lines printed in an oops too, possibly wrapping the warning off the console. And finally, trying to stash away each module used in a callstack to provide the ID of each symbol printed is cumbersome and would require changes to each architecture to stash away modules and return their build IDs once unwinding has completed. Instead, we opt for the simpler approach of introducing new printk formats '%pS[R]b' for "pointer symbolic backtrace with module build ID" and '%pBb' for "pointer backtrace with module build ID" and then updating the few places in the architecture layer where the stacktrace is printed to use this new format. Before: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm] direct_entry+0x16c/0x1b4 [lkdtm] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 After: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] direct_entry+0x16c/0x1b4 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 [akpm@linux-foundation.org: fix build with CONFIG_MODULES=n, tweak code layout] [rdunlap@infradead.org: fix build when CONFIG_MODULES is not set] Link: https://lkml.kernel.org/r/20210513171510.20328-1-rdunlap@infradead.org [akpm@linux-foundation.org: make kallsyms_lookup_buildid() static] [cuibixuan@huawei.com: fix build error when CONFIG_SYSFS is disabled] Link: https://lkml.kernel.org/r/20210525105049.34804-1-cuibixuan@huawei.com Link: https://lkml.kernel.org/r/20210511003845.2429846-6-swboyd@chromium.org Link: https://fedoraproject.org/wiki/Releases/FeatureBuildId [1] Link: https://sourceware.org/elfutils/Debuginfod.html [2] Signed-off-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Bixuan Cui <cuibixuan@huawei.com> Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Evan Green <evgreen@chromium.org> Cc: Hsin-Yi Wang <hsinyi@chromium.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Young <dyoung@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Sasha Levin <sashal@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 04:09:20 +03:00
* - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
module: add printk formats to add module build ID to stacktraces Let's make kernel stacktraces easier to identify by including the build ID[1] of a module if the stacktrace is printing a symbol from a module. This makes it simpler for developers to locate a kernel module's full debuginfo for a particular stacktrace. Combined with scripts/decode_stracktrace.sh, a developer can download the matching debuginfo from a debuginfod[2] server and find the exact file and line number for the functions plus offsets in a stacktrace that match the module. This is especially useful for pstore crash debugging where the kernel crashes are recorded in something like console-ramoops and the recovery kernel/modules are different or the debuginfo doesn't exist on the device due to space concerns (the debuginfo can be too large for space limited devices). Originally, I put this on the %pS format, but that was quickly rejected given that %pS is used in other places such as ftrace where build IDs aren't meaningful. There was some discussions on the list to put every module build ID into the "Modules linked in:" section of the stacktrace message but that quickly becomes very hard to read once you have more than three or four modules linked in. It also provides too much information when we don't expect each module to be traversed in a stacktrace. Having the build ID for modules that aren't important just makes things messy. Splitting it to multiple lines for each module quickly explodes the number of lines printed in an oops too, possibly wrapping the warning off the console. And finally, trying to stash away each module used in a callstack to provide the ID of each symbol printed is cumbersome and would require changes to each architecture to stash away modules and return their build IDs once unwinding has completed. Instead, we opt for the simpler approach of introducing new printk formats '%pS[R]b' for "pointer symbolic backtrace with module build ID" and '%pBb' for "pointer backtrace with module build ID" and then updating the few places in the architecture layer where the stacktrace is printed to use this new format. Before: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm] direct_entry+0x16c/0x1b4 [lkdtm] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 After: Call trace: lkdtm_WARNING+0x28/0x30 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] direct_entry+0x16c/0x1b4 [lkdtm 6c2215028606bda50de823490723dc4bc5bf46f9] full_proxy_write+0x74/0xa4 vfs_write+0xec/0x2e8 [akpm@linux-foundation.org: fix build with CONFIG_MODULES=n, tweak code layout] [rdunlap@infradead.org: fix build when CONFIG_MODULES is not set] Link: https://lkml.kernel.org/r/20210513171510.20328-1-rdunlap@infradead.org [akpm@linux-foundation.org: make kallsyms_lookup_buildid() static] [cuibixuan@huawei.com: fix build error when CONFIG_SYSFS is disabled] Link: https://lkml.kernel.org/r/20210525105049.34804-1-cuibixuan@huawei.com Link: https://lkml.kernel.org/r/20210511003845.2429846-6-swboyd@chromium.org Link: https://fedoraproject.org/wiki/Releases/FeatureBuildId [1] Link: https://sourceware.org/elfutils/Debuginfod.html [2] Signed-off-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Bixuan Cui <cuibixuan@huawei.com> Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Evan Green <evgreen@chromium.org> Cc: Hsin-Yi Wang <hsinyi@chromium.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Young <dyoung@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Sasha Levin <sashal@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-07-08 04:09:20 +03:00
* - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
* - 'b[l]' For a bitmap, the number of bits is determined by the field
* width which must be explicitly specified either as part of the
* format string '%32b[l]' or through '%*b[l]', [l] selects
* range-list format instead of hex format
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
* - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
lib: vsprintf: add IPv4/v6 generic %p[Ii]S[pfs] format specifier In order to avoid making code that deals with printing both, IPv4 and IPv6 addresses, unnecessary complicated as for example ... if (sa.sa_family == AF_INET6) printk("... %pI6 ...", ..sin6_addr); else printk("... %pI4 ...", ..sin_addr.s_addr); ... it would be better to introduce a format specifier that can deal with those kind of situations internally; just as we have a "struct sockaddr" for generic mapping into "struct sockaddr_in" or "struct sockaddr_in6" as e.g. done in "union sctp_addr". Then, we could reduce the above statement into something like: printk("... %pIS ..", &sockaddr); In case our pointer is NULL, pointer() then deals with that already at an earlier point in time internally. While we're at it, support for both %piS/%pIS, where 'S' stands for sockaddr, comes (almost) for free. Additionally to that, postfix specifiers 'p', 'f' and 's' are supported as suggested and initially implemented in 2009 by Joe Perches [1]. Handling of those additional specifiers orientate on the initial RFC that was proposed. Also we support IPv6 compressed format specified by 'c' and various other IPv4 extensions as stated in the documentation part. Likely, there are many other areas than just SCTP in the kernel to make use of this extension as well. [1] http://patchwork.ozlabs.org/patch/31480/ Signed-off-by: Daniel Borkmann <dborkman@redhat.com> CC: Joe Perches <joe@perches.com> CC: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:39 +04:00
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - 'i' [46] for 'raw' IPv4/IPv6 addresses
* IPv6 omits the colons (01020304...0f)
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
lib: vsprintf: add IPv4/v6 generic %p[Ii]S[pfs] format specifier In order to avoid making code that deals with printing both, IPv4 and IPv6 addresses, unnecessary complicated as for example ... if (sa.sa_family == AF_INET6) printk("... %pI6 ...", ..sin6_addr); else printk("... %pI4 ...", ..sin_addr.s_addr); ... it would be better to introduce a format specifier that can deal with those kind of situations internally; just as we have a "struct sockaddr" for generic mapping into "struct sockaddr_in" or "struct sockaddr_in6" as e.g. done in "union sctp_addr". Then, we could reduce the above statement into something like: printk("... %pIS ..", &sockaddr); In case our pointer is NULL, pointer() then deals with that already at an earlier point in time internally. While we're at it, support for both %piS/%pIS, where 'S' stands for sockaddr, comes (almost) for free. Additionally to that, postfix specifiers 'p', 'f' and 's' are supported as suggested and initially implemented in 2009 by Joe Perches [1]. Handling of those additional specifiers orientate on the initial RFC that was proposed. Also we support IPv6 compressed format specified by 'c' and various other IPv4 extensions as stated in the documentation part. Likely, there are many other areas than just SCTP in the kernel to make use of this extension as well. [1] http://patchwork.ozlabs.org/patch/31480/ Signed-off-by: Daniel Borkmann <dborkman@redhat.com> CC: Joe Perches <joe@perches.com> CC: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:39 +04:00
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
* https://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
* a - ESCAPE_ANY
* c - ESCAPE_SPECIAL
* h - ESCAPE_HEX
* n - ESCAPE_NULL
* o - ESCAPE_OCTAL
* p - ESCAPE_NP
* s - ESCAPE_SPACE
* By default ESCAPE_ANY_NP is used.
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
* b big endian lower case hex (default)
* B big endian UPPER case hex
* l little endian lower case hex
* L little endian UPPER case hex
* big endian output byte order is:
* [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
* little endian output byte order is:
* [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
* - 'V' For a struct va_format which contains a format string * and va_list *,
* call vsnprintf(->format, *->va_list).
* Implements a "recursive vsnprintf".
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users.
* Use only for procfs, sysfs and similar files, not printk(); please
* read the documentation (path below) first.
* - 'NF' For a netdev_features_t
* - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
* D dash
* N no separator
* The maximum supported length is 64 bytes of the input. Consider
* to use print_hex_dump() for the larger input.
* - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
* - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
* - 'G' For flags to be printed as a collection of symbolic strings that would
* construct the specific value. Supported flags given by option:
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
* - 'OF[fnpPcCF]' For a device tree object
* Without any optional arguments prints the full_name
* f device node full_name
* n device node name
* p device node phandle
* P device node path spec (name + @unit)
* F device node flags
* c major compatible string
* C full compatible string
* - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
* Without an option prints the full name of the node
* f full name
* P node name, including a possible unit address
* - 'x' For printing the address unmodified. Equivalent to "%lx".
* Please read the documentation (path below) before using!
bpf: Restrict bpf_trace_printk()'s %s usage and add %pks, %pus specifier Usage of plain %s conversion specifier in bpf_trace_printk() suffers from the very same issue as bpf_probe_read{,str}() helpers, that is, it is broken on archs with overlapping address ranges. While the helpers have been addressed through work in 6ae08ae3dea2 ("bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers"), we need an option for bpf_trace_printk() as well to fix it. Similarly as with the helpers, force users to make an explicit choice by adding %pks and %pus specifier to bpf_trace_printk() which will then pick the corresponding strncpy_from_unsafe*() variant to perform the access under KERNEL_DS or USER_DS. The %pk* (kernel specifier) and %pu* (user specifier) can later also be extended for other objects aside strings that are probed and printed under tracing, and reused out of other facilities like bpf_seq_printf() or BTF based type printing. Existing behavior of %s for current users is still kept working for archs where it is not broken and therefore gated through CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE. For archs not having this property we fall-back to pick probing under KERNEL_DS as a sensible default. Fixes: 8d3b7dce8622 ("bpf: add support for %s specifier to bpf_trace_printk()") Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Link: https://lore.kernel.org/bpf/20200515101118.6508-4-daniel@iogearbox.net
2020-05-15 13:11:18 +03:00
* - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
* bpf_trace_printk() where [ku] prefix specifies either kernel (k)
* or user (u) memory to probe, and:
* s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
case 'S':
case 's':
symbol lookup: introduce dereference_symbol_descriptor() dereference_symbol_descriptor() invokes appropriate ARCH specific function descriptor dereference callbacks: - dereference_kernel_function_descriptor() if the pointer is a kernel symbol; - dereference_module_function_descriptor() if the pointer is a module symbol. This is the last step needed to make '%pS/%ps' smart enough to handle function descriptor dereference on affected ARCHs and to retire '%pF/%pf'. To refresh it: Some architectures (ia64, ppc64, parisc64) use an indirect pointer for C function pointers - the function pointer points to a function descriptor and we need to dereference it to get the actual function pointer. Function descriptors live in .opd elf section and all affected ARCHs (ia64, ppc64, parisc64) handle it properly for kernel and modules. So we, technically, can decide if the dereference is needed by simply looking at the pointer: if it belongs to .opd section then we need to dereference it. The kernel and modules have their own .opd sections, obviously, that's why we need to split dereference_function_descriptor() and use separate kernel and module dereference arch callbacks. Link: http://lkml.kernel.org/r/20171206043649.GB15885@jagdpanzerIV Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: James Bottomley <jejb@parisc-linux.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jessica Yu <jeyu@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: linux-ia64@vger.kernel.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Tested-by: Tony Luck <tony.luck@intel.com> #ia64 Tested-by: Santosh Sivaraj <santosh@fossix.org> #powerpc Tested-by: Helge Deller <deller@gmx.de> #parisc64 Signed-off-by: Petr Mladek <pmladek@suse.com>
2017-12-06 07:36:49 +03:00
ptr = dereference_symbol_descriptor(ptr);
fallthrough;
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
return resource_string(buf, end, ptr, spec, fmt);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
lib/vsprintf: implement bitmap printing through '%*pb[l]' bitmap and its derivatives such as cpumask and nodemask currently only provide formatting functions which put the output string into the provided buffer; however, how long this buffer should be isn't defined anywhere and given that some of these bitmaps can be too large to be formatted into an on-stack buffer it users sometimes are unnecessarily forced to come up with creative solutions and compromises for the buffer just to printk these bitmaps. There have been a couple different attempts at making this easier. 1. Way back, PeterZ tried printk '%pb' extension with the precision for bit width - '%.*pb'. This was intuitive and made sense but unfortunately triggered a compile warning about using precision for a pointer. http://lkml.kernel.org/g/1336577562.2527.58.camel@twins 2. I implemented bitmap_pr_cont[_list]() and its wrappers for cpumask and nodemask. This works but PeterZ pointed out that pr_cont's tendency to produce broken lines when multiple CPUs are printing is bothering considering the usages. http://lkml.kernel.org/g/1418226774-30215-3-git-send-email-tj@kernel.org So, this patch is another attempt at teaching printk and friends how to print bitmaps. It's almost identical to what PeterZ tried with precision but it uses the field width for the number of bits instead of precision. The format used is '%*pb[l]', with the optional trailing 'l' specifying list format instead of hex masks. This is a valid format string and doesn't trigger compiler warnings; however, it does make it impossible to specify output field width when printing bitmaps. I think this is an acceptable trade-off given how much easier it makes printing bitmaps and that we don't have any in-kernel user which is using the field width specification. If any future user wants to use field width with a bitmap, it'd have to format the bitmap into a string buffer and then print that buffer with width spec, which isn't different from how it should be done now. This patch implements bitmap[_list]_string() which are called from the vsprintf pointer() formatting function. The implementation is mostly identical to bitmap_scn[list]printf() except that the output is performed in the vsprintf way. These functions handle formatting into too small buffers and sprintf() family of functions report the correct overrun output length. bitmap_scn[list]printf() are now thin wrappers around scnprintf(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Mike Travis <travis@sgi.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 01:36:53 +03:00
case 'b':
switch (fmt[1]) {
case 'l':
return bitmap_list_string(buf, end, ptr, spec, fmt);
default:
return bitmap_string(buf, end, ptr, spec, fmt);
}
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
/* [mM]F (FDDI) */
/* [mM]R (Reverse order; Bluetooth) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
* 6: 0001:0203:...:0708
* 6c: 1::708 or 1::1.2.3.4
*/
case 'i': /* Contiguous:
* 4: 001.002.003.004
* 6: 000102...0f
*/
return ip_addr_string(buf, end, ptr, spec, fmt);
case 'E':
return escaped_string(buf, end, ptr, spec, fmt);
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
return va_format(buf, end, ptr, spec, fmt);
kptr_restrict for hiding kernel pointers from unprivileged users Add the %pK printk format specifier and the /proc/sys/kernel/kptr_restrict sysctl. The %pK format specifier is designed to hide exposed kernel pointers, specifically via /proc interfaces. Exposing these pointers provides an easy target for kernel write vulnerabilities, since they reveal the locations of writable structures containing easily triggerable function pointers. The behavior of %pK depends on the kptr_restrict sysctl. If kptr_restrict is set to 0, no deviation from the standard %p behavior occurs. If kptr_restrict is set to 1, the default, if the current user (intended to be a reader via seq_printf(), etc.) does not have CAP_SYSLOG (currently in the LSM tree), kernel pointers using %pK are printed as 0's. If kptr_restrict is set to 2, kernel pointers using %pK are printed as 0's regardless of privileges. Replacing with 0's was chosen over the default "(null)", which cannot be parsed by userland %p, which expects "(nil)". [akpm@linux-foundation.org: check for IRQ context when !kptr_restrict, save an indent level, s/WARN/WARN_ONCE/] [akpm@linux-foundation.org: coding-style fixup] [randy.dunlap@oracle.com: fix kernel/sysctl.c warning] Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com> Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com> Cc: James Morris <jmorris@namei.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Thomas Graf <tgraf@infradead.org> Cc: Eugene Teo <eugeneteo@kernel.org> Cc: Kees Cook <kees.cook@canonical.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Eric Paris <eparis@parisplace.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-01-13 03:59:41 +03:00
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
case '4':
return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
return address_val(buf, end, ptr, spec, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
case 't':
return time_and_date(buf, end, ptr, spec, fmt);
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
return file_dentry_name(buf, end, ptr, spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
#endif
mm, printk: introduce new format string for flags In mm we use several kinds of flags bitfields that are sometimes printed for debugging purposes, or exported to userspace via sysfs. To make them easier to interpret independently on kernel version and config, we want to dump also the symbolic flag names. So far this has been done with repeated calls to pr_cont(), which is unreliable on SMP, and not usable for e.g. sysfs export. To get a more reliable and universal solution, this patch extends printk() format string for pointers to handle the page flags (%pGp), gfp_flags (%pGg) and vma flags (%pGv). Existing users of dump_flag_names() are converted and simplified. It would be possible to pass flags by value instead of pointer, but the %p format string for pointers already has extensions for various kernel structures, so it's a good fit, and the extra indirection in a non-critical path is negligible. [linux@rasmusvillemoes.dk: lots of good implementation suggestions] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-16 00:55:56 +03:00
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'x':
return pointer_string(buf, end, ptr, spec);
printf: add support for printing symbolic error names It has been suggested several times to extend vsnprintf() to be able to convert the numeric value of ENOSPC to print "ENOSPC". This implements that as a %p extension: With %pe, one can do if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %pe\n", foo); return PTR_ERR(foo); } instead of what is seen in quite a few places in the kernel: if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %ld\n", PTR_ERR(foo)); return PTR_ERR(foo); } If the value passed to %pe is an ERR_PTR, but the library function errname() added here doesn't know about the value, the value is simply printed in decimal. If the value passed to %pe is not an ERR_PTR, we treat it as an ordinary %p and thus print the hashed value (passing non-ERR_PTR values to %pe indicates a bug in the caller, but we can't do much about that). With my embedded hat on, and because it's not very invasive to do, I've made it possible to remove this. The errname() function and associated lookup tables take up about 3K. For most, that's probably quite acceptable and a price worth paying for more readable dmesg (once this starts getting used), while for those that disable printk() it's of very little use - I don't see a procfs/sysfs/seq_printf() file reasonably making use of this - and they clearly want to squeeze vmlinux as much as possible. Hence the default y if PRINTK. The symbols to include have been found by massaging the output of find arch include -iname 'errno*.h' | xargs grep -E 'define\s*E' In the cases where some common aliasing exists (e.g. EAGAIN=EWOULDBLOCK on all platforms, EDEADLOCK=EDEADLK on most), I've moved the more popular one (in terms of 'git grep -w Efoo | wc) to the bottom so that one takes precedence. Link: http://lkml.kernel.org/r/20191015190706.15989-1-linux@rasmusvillemoes.dk To: "Jonathan Corbet" <corbet@lwn.net> To: linux-kernel@vger.kernel.org Cc: "Andy Shevchenko" <andy.shevchenko@gmail.com> Cc: "Andrew Morton" <akpm@linux-foundation.org> Cc: "Joe Perches" <joe@perches.com> Cc: linux-doc@vger.kernel.org Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Uwe Kleine-König <uwe@kleine-koenig.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [andy.shevchenko@gmail.com: use abs()] Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-10-15 22:07:05 +03:00
case 'e':
/* %pe with a non-ERR_PTR gets treated as plain %p */
if (!IS_ERR(ptr))
break;
return err_ptr(buf, end, ptr, spec);
bpf: Restrict bpf_trace_printk()'s %s usage and add %pks, %pus specifier Usage of plain %s conversion specifier in bpf_trace_printk() suffers from the very same issue as bpf_probe_read{,str}() helpers, that is, it is broken on archs with overlapping address ranges. While the helpers have been addressed through work in 6ae08ae3dea2 ("bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers"), we need an option for bpf_trace_printk() as well to fix it. Similarly as with the helpers, force users to make an explicit choice by adding %pks and %pus specifier to bpf_trace_printk() which will then pick the corresponding strncpy_from_unsafe*() variant to perform the access under KERNEL_DS or USER_DS. The %pk* (kernel specifier) and %pu* (user specifier) can later also be extended for other objects aside strings that are probed and printed under tracing, and reused out of other facilities like bpf_seq_printf() or BTF based type printing. Existing behavior of %s for current users is still kept working for archs where it is not broken and therefore gated through CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE. For archs not having this property we fall-back to pick probing under KERNEL_DS as a sensible default. Fixes: 8d3b7dce8622 ("bpf: add support for %s specifier to bpf_trace_printk()") Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Link: https://lore.kernel.org/bpf/20200515101118.6508-4-daniel@iogearbox.net
2020-05-15 13:11:18 +03:00
case 'u':
case 'k':
switch (fmt[1]) {
case 's':
return string(buf, end, ptr, spec);
default:
return error_string(buf, end, "(einval)", spec);
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
/*
* default is to _not_ leak addresses, so hash before printing,
* unless no_hash_pointers is specified on the command line.
*/
if (unlikely(no_hash_pointers))
return pointer_string(buf, end, ptr, spec);
else
return ptr_to_id(buf, end, ptr, spec);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
/*
* Helper function to decode printf style format.
* Each call decode a token from the format and return the
* number of characters read (or likely the delta where it wants
* to go on the next call).
* The decoded token is returned through the parameters
*
* 'h', 'l', or 'L' for integer fields
* 'z' support added 23/7/1999 S.H.
* 'z' changed to 'Z' --davidm 1/25/99
* 'Z' changed to 'z' --adobriyan 2017-01-25
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
* 't' added for ptrdiff_t
*
* @fmt: the format string
* @type of the token returned
* @flags: various flags such as +, -, # tokens..
* @field_width: overwritten width
* @base: base of the number (octal, hex, ...)
* @precision: precision of a number
* @qualifier: qualifier of a number (long, size_t, ...)
*/
static noinline_for_stack
int format_decode(const char *fmt, struct printf_spec *spec)
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
{
const char *start = fmt;
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
char qualifier;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
/* we finished early by reading the field width */
if (spec->type == FORMAT_TYPE_WIDTH) {
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (spec->field_width < 0) {
spec->field_width = -spec->field_width;
spec->flags |= LEFT;
}
spec->type = FORMAT_TYPE_NONE;
goto precision;
}
/* we finished early by reading the precision */
if (spec->type == FORMAT_TYPE_PRECISION) {
if (spec->precision < 0)
spec->precision = 0;
spec->type = FORMAT_TYPE_NONE;
goto qualifier;
}
/* By default */
spec->type = FORMAT_TYPE_NONE;
for (; *fmt ; ++fmt) {
if (*fmt == '%')
break;
}
/* Return the current non-format string */
if (fmt != start || !*fmt)
return fmt - start;
/* Process flags */
spec->flags = 0;
while (1) { /* this also skips first '%' */
bool found = true;
++fmt;
switch (*fmt) {
case '-': spec->flags |= LEFT; break;
case '+': spec->flags |= PLUS; break;
case ' ': spec->flags |= SPACE; break;
case '#': spec->flags |= SPECIAL; break;
case '0': spec->flags |= ZEROPAD; break;
default: found = false;
}
if (!found)
break;
}
/* get field width */
spec->field_width = -1;
if (isdigit(*fmt))
spec->field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
/* it's the next argument */
spec->type = FORMAT_TYPE_WIDTH;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
return ++fmt - start;
}
precision:
/* get the precision */
spec->precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt)) {
spec->precision = skip_atoi(&fmt);
if (spec->precision < 0)
spec->precision = 0;
} else if (*fmt == '*') {
/* it's the next argument */
spec->type = FORMAT_TYPE_PRECISION;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
return ++fmt - start;
}
}
qualifier:
/* get the conversion qualifier */
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
qualifier = 0;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
*fmt == 'z' || *fmt == 't') {
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'l') {
qualifier = 'L';
++fmt;
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
} else if (qualifier == 'h') {
qualifier = 'H';
++fmt;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
}
/* default base */
spec->base = 10;
switch (*fmt) {
case 'c':
spec->type = FORMAT_TYPE_CHAR;
return ++fmt - start;
case 's':
spec->type = FORMAT_TYPE_STR;
return ++fmt - start;
case 'p':
spec->type = FORMAT_TYPE_PTR;
return ++fmt - start;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
return ++fmt - start;
/* integer number formats - set up the flags and "break" */
case 'o':
spec->base = 8;
break;
case 'x':
spec->flags |= SMALL;
fallthrough;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case 'X':
spec->base = 16;
break;
case 'd':
case 'i':
spec->flags |= SIGN;
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case 'u':
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case 'n':
/*
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
* Since %n poses a greater security risk than
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
fallthrough;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
default:
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
spec->type = FORMAT_TYPE_INVALID;
return fmt - start;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
if (qualifier == 'L')
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
spec->type = FORMAT_TYPE_LONG_LONG;
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
else if (qualifier == 'l') {
BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
} else if (qualifier == 'z') {
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
spec->type = FORMAT_TYPE_SIZE_T;
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
} else if (qualifier == 't') {
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
spec->type = FORMAT_TYPE_PTRDIFF;
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
} else if (qualifier == 'H') {
BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
lib/vsprintf.c: expand field_width to 24 bits Maurizio Lombardi reported a problem [1] with the %pb extension: It doesn't work for sufficiently large bitmaps, since the size is stashed in the field_width field of the struct printf_spec, which is currently an s16. Concretely, this manifested itself in /sys/bus/pseudo/drivers/scsi_debug/map being empty, since the bitmap printer got a size of 0, which is the 16 bit truncation of the actual bitmap size. We do want to keep struct printf_spec at 8 bytes so that it can cheaply be passed by value. The qualifier field is only used for internal bookkeeping in format_decode, so we might as well use a local variable for that. This gives us an additional 8 bits, which we can then use for the field width. To stay in 8 bytes, we need to do a little rearranging and make the type member a bitfield as well. For consistency, change all the members to bit fields. gcc doesn't generate much worse code with these changes (in fact, bloat-o-meter says we save 300 bytes - which I think is a little surprising). I didn't find a BUILD_BUG/compiletime_assertion/... which would work outside function context, so for now I just open-coded it. [1] http://thread.gmane.org/gmane.linux.kernel/2034835 [akpm@linux-foundation.org: avoid open-coded BUILD_BUG_ON] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reported-by: Maurizio Lombardi <mlombard@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Joe Perches <joe@perches.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 03:58:37 +03:00
} else if (qualifier == 'h') {
BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
} else {
BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
return ++fmt - start;
}
static void
set_field_width(struct printf_spec *spec, int width)
{
spec->field_width = width;
if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) {
spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX);
}
}
static void
set_precision(struct printf_spec *spec, int prec)
{
spec->precision = prec;
if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) {
spec->precision = clamp(prec, 0, PRECISION_MAX);
}
}
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* This function generally follows C99 vsnprintf, but has some
* extensions and a few limitations:
*
* - ``%n`` is unsupported
* - ``%p*`` is handled by pointer()
*
* See pointer() or Documentation/core-api/printk-formats.rst for more
* extensive description.
*
* **Please update the documentation in both places when making changes**
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*
* If you're not already dealing with a va_list consider using snprintf().
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
unsigned long long num;
char *str, *end;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
struct printf_spec spec = {0};
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (*fmt) {
const char *old_fmt = fmt;
int read = format_decode(fmt, &spec);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
fmt += read;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
switch (spec.type) {
case FORMAT_TYPE_NONE: {
int copy = read;
if (str < end) {
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
str += read;
break;
}
case FORMAT_TYPE_WIDTH:
set_field_width(&spec, va_arg(args, int));
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PRECISION:
set_precision(&spec, va_arg(args, int));
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_CHAR: {
char c;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
}
c = (unsigned char) va_arg(args, int);
if (str < end)
*str = c;
++str;
while (--spec.field_width > 0) {
if (str < end)
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
*str = ' ';
++str;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
break;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_STR:
str = string(str, end, va_arg(args, char *), spec);
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PTR:
str = pointer(fmt, str, end, va_arg(args, void *),
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
spec);
while (isalnum(*fmt))
fmt++;
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_INVALID:
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
/*
* Presumably the arguments passed gcc's type
* checking, but there is no safe or sane way
* for us to continue parsing the format and
* fetching from the va_list; the remaining
* specifiers and arguments would be out of
* sync.
*/
goto out;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = va_arg(args, long long);
break;
case FORMAT_TYPE_ULONG:
num = va_arg(args, unsigned long);
break;
case FORMAT_TYPE_LONG:
num = va_arg(args, long);
break;
case FORMAT_TYPE_SIZE_T:
if (spec.flags & SIGN)
num = va_arg(args, ssize_t);
else
num = va_arg(args, size_t);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_PTRDIFF:
num = va_arg(args, ptrdiff_t);
break;
case FORMAT_TYPE_UBYTE:
num = (unsigned char) va_arg(args, int);
break;
case FORMAT_TYPE_BYTE:
num = (signed char) va_arg(args, int);
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_USHORT:
num = (unsigned short) va_arg(args, int);
break;
case FORMAT_TYPE_SHORT:
num = (short) va_arg(args, int);
break;
case FORMAT_TYPE_INT:
num = (int) va_arg(args, int);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
default:
num = va_arg(args, unsigned int);
}
str = number(str, end, num, spec);
}
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
out:
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
/* the trailing null byte doesn't count towards the total */
return str-buf;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
EXPORT_SYMBOL(vsnprintf);
/**
* vscnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The return value is the number of characters which have been written into
* the @buf not including the trailing '\0'. If @size is == 0 the function
* returns 0.
*
* If you're not already dealing with a va_list consider using scnprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
i = vsnprintf(buf, size, fmt, args);
if (likely(i < size))
return i;
if (size != 0)
return size - 1;
return 0;
}
EXPORT_SYMBOL(vscnprintf);
/**
* snprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing null,
* as per ISO C99. If the return is greater than or equal to
* @size, the resulting string is truncated.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(snprintf);
/**
* scnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
* the trailing '\0'. If @size is == 0 the function returns 0.
*/
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(scnprintf);
/**
* vsprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
* buffer overflows.
*
* If you're not already dealing with a va_list consider using sprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
return vsnprintf(buf, INT_MAX, fmt, args);
}
EXPORT_SYMBOL(vsprintf);
/**
* sprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sprintf);
#ifdef CONFIG_BINARY_PRINTF
/*
* bprintf service:
* vbin_printf() - VA arguments to binary data
* bstr_printf() - Binary data to text string
*/
/**
* vbin_printf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
* is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.
*
* NOTE:
* If the return value is greater than @size, the resulting bin_buf is NOT
* valid for bstr_printf().
*/
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
struct printf_spec spec = {0};
char *str, *end;
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
int width;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
#define save_arg(type) \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
({ \
unsigned long long value; \
if (sizeof(type) == 8) { \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
unsigned long long val8; \
str = PTR_ALIGN(str, sizeof(u32)); \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
val8 = va_arg(args, unsigned long long); \
if (str + sizeof(type) <= end) { \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
*(u32 *)str = *(u32 *)&val8; \
*(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
} \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
value = val8; \
} else { \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
unsigned int val4; \
str = PTR_ALIGN(str, sizeof(type)); \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
val4 = va_arg(args, int); \
if (str + sizeof(type) <= end) \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
*(typeof(type) *)str = (type)(long)val4; \
value = (unsigned long long)val4; \
} \
str += sizeof(type); \
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
value; \
})
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (*fmt) {
int read = format_decode(fmt, &spec);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
fmt += read;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
switch (spec.type) {
case FORMAT_TYPE_NONE:
case FORMAT_TYPE_PERCENT_CHAR:
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
case FORMAT_TYPE_INVALID:
goto out;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_WIDTH:
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PRECISION:
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
width = (int)save_arg(int);
/* Pointers may require the width */
if (*fmt == 'p')
set_field_width(&spec, width);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_CHAR:
save_arg(char);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_STR: {
const char *save_str = va_arg(args, char *);
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
const char *err_msg;
size_t len;
vsprintf: Prevent crash when dereferencing invalid pointers We already prevent crash when dereferencing some obviously broken pointers. But the handling is not consistent. Sometimes we print "(null)" only for pure NULL pointer, sometimes for pointers in the first page and sometimes also for pointers in the last page (error codes). Note that printk() call this code under logbuf_lock. Any recursive printks are redirected to the printk_safe implementation and the messages are stored into per-CPU buffers. These buffers might be eventually flushed in printk_safe_flush_on_panic() but it is not guaranteed. This patch adds a check using probe_kernel_read(). It is not a full-proof test. But it should help to see the error message in 99% situations where the kernel would silently crash otherwise. Also it makes the error handling unified for "%s" and the many %p* specifiers that need to read the data from a given address. We print: + (null) when accessing data on pure pure NULL address + (efault) when accessing data on an invalid address It does not affect the %p* specifiers that just print the given address in some form, namely %pF, %pf, %pS, %ps, %pB, %pK, %px, and plain %p. Note that we print (efault) from security reasons. In fact, the real address can be seen only by %px or eventually %pK. Link: http://lkml.kernel.org/r/20190417115350.20479-9-pmladek@suse.com To: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "Tobin C . Harding" <me@tobin.cc> Cc: Joe Perches <joe@perches.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-04-17 14:53:48 +03:00
err_msg = check_pointer_msg(save_str);
if (err_msg)
save_str = err_msg;
len = strlen(save_str) + 1;
if (str + len < end)
memcpy(str, save_str, len);
str += len;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PTR:
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
/* Dereferenced pointers must be done now */
switch (*fmt) {
/* Dereference of functions is still OK */
case 'S':
case 's':
case 'x':
case 'K':
printf: add support for printing symbolic error names It has been suggested several times to extend vsnprintf() to be able to convert the numeric value of ENOSPC to print "ENOSPC". This implements that as a %p extension: With %pe, one can do if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %pe\n", foo); return PTR_ERR(foo); } instead of what is seen in quite a few places in the kernel: if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %ld\n", PTR_ERR(foo)); return PTR_ERR(foo); } If the value passed to %pe is an ERR_PTR, but the library function errname() added here doesn't know about the value, the value is simply printed in decimal. If the value passed to %pe is not an ERR_PTR, we treat it as an ordinary %p and thus print the hashed value (passing non-ERR_PTR values to %pe indicates a bug in the caller, but we can't do much about that). With my embedded hat on, and because it's not very invasive to do, I've made it possible to remove this. The errname() function and associated lookup tables take up about 3K. For most, that's probably quite acceptable and a price worth paying for more readable dmesg (once this starts getting used), while for those that disable printk() it's of very little use - I don't see a procfs/sysfs/seq_printf() file reasonably making use of this - and they clearly want to squeeze vmlinux as much as possible. Hence the default y if PRINTK. The symbols to include have been found by massaging the output of find arch include -iname 'errno*.h' | xargs grep -E 'define\s*E' In the cases where some common aliasing exists (e.g. EAGAIN=EWOULDBLOCK on all platforms, EDEADLOCK=EDEADLK on most), I've moved the more popular one (in terms of 'git grep -w Efoo | wc) to the bottom so that one takes precedence. Link: http://lkml.kernel.org/r/20191015190706.15989-1-linux@rasmusvillemoes.dk To: "Jonathan Corbet" <corbet@lwn.net> To: linux-kernel@vger.kernel.org Cc: "Andy Shevchenko" <andy.shevchenko@gmail.com> Cc: "Andrew Morton" <akpm@linux-foundation.org> Cc: "Joe Perches" <joe@perches.com> Cc: linux-doc@vger.kernel.org Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Uwe Kleine-König <uwe@kleine-koenig.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [andy.shevchenko@gmail.com: use abs()] Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-10-15 22:07:05 +03:00
case 'e':
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
save_arg(void *);
break;
default:
if (!isalnum(*fmt)) {
save_arg(void *);
break;
}
str = pointer(fmt, str, end, va_arg(args, void *),
spec);
if (str + 1 < end)
*str++ = '\0';
else
end[-1] = '\0'; /* Must be nul terminated */
}
/* skip all alphanumeric pointer suffixes */
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (isalnum(*fmt))
fmt++;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
save_arg(long long);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_ULONG:
case FORMAT_TYPE_LONG:
save_arg(unsigned long);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_SIZE_T:
save_arg(size_t);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_PTRDIFF:
save_arg(ptrdiff_t);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_UBYTE:
case FORMAT_TYPE_BYTE:
save_arg(char);
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_USHORT:
case FORMAT_TYPE_SHORT:
save_arg(short);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
default:
save_arg(int);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
}
}
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
out:
return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
/**
* bstr_printf - Format a string from binary arguments and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @bin_buf: Binary arguments for the format string
*
* This function like C99 vsnprintf, but the difference is that vsnprintf gets
* arguments from stack, and bstr_printf gets arguments from @bin_buf which is
* a binary buffer that generated by vbin_printf.
*
* The format follows C99 vsnprintf, but has some extensions:
* see vsnprintf comment for details.
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*/
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
{
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
struct printf_spec spec = {0};
char *str, *end;
const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
#define get_arg(type) \
({ \
typeof(type) value; \
if (sizeof(type) == 8) { \
args = PTR_ALIGN(args, sizeof(u32)); \
*(u32 *)&value = *(u32 *)args; \
*((u32 *)&value + 1) = *(u32 *)(args + 4); \
} else { \
args = PTR_ALIGN(args, sizeof(type)); \
value = *(typeof(type) *)args; \
} \
args += sizeof(type); \
value; \
})
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (*fmt) {
const char *old_fmt = fmt;
int read = format_decode(fmt, &spec);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
fmt += read;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
switch (spec.type) {
case FORMAT_TYPE_NONE: {
int copy = read;
if (str < end) {
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
str += read;
break;
}
case FORMAT_TYPE_WIDTH:
set_field_width(&spec, get_arg(int));
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PRECISION:
set_precision(&spec, get_arg(int));
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
case FORMAT_TYPE_CHAR: {
char c;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) get_arg(char);
if (str < end)
*str = c;
++str;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
str = string(str, end, (char *)str_arg, spec);
break;
}
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
case FORMAT_TYPE_PTR: {
bool process = false;
int copy, len;
/* Non function dereferences were already done */
switch (*fmt) {
case 'S':
case 's':
case 'x':
case 'K':
printf: add support for printing symbolic error names It has been suggested several times to extend vsnprintf() to be able to convert the numeric value of ENOSPC to print "ENOSPC". This implements that as a %p extension: With %pe, one can do if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %pe\n", foo); return PTR_ERR(foo); } instead of what is seen in quite a few places in the kernel: if (IS_ERR(foo)) { pr_err("Sorry, can't do that: %ld\n", PTR_ERR(foo)); return PTR_ERR(foo); } If the value passed to %pe is an ERR_PTR, but the library function errname() added here doesn't know about the value, the value is simply printed in decimal. If the value passed to %pe is not an ERR_PTR, we treat it as an ordinary %p and thus print the hashed value (passing non-ERR_PTR values to %pe indicates a bug in the caller, but we can't do much about that). With my embedded hat on, and because it's not very invasive to do, I've made it possible to remove this. The errname() function and associated lookup tables take up about 3K. For most, that's probably quite acceptable and a price worth paying for more readable dmesg (once this starts getting used), while for those that disable printk() it's of very little use - I don't see a procfs/sysfs/seq_printf() file reasonably making use of this - and they clearly want to squeeze vmlinux as much as possible. Hence the default y if PRINTK. The symbols to include have been found by massaging the output of find arch include -iname 'errno*.h' | xargs grep -E 'define\s*E' In the cases where some common aliasing exists (e.g. EAGAIN=EWOULDBLOCK on all platforms, EDEADLOCK=EDEADLK on most), I've moved the more popular one (in terms of 'git grep -w Efoo | wc) to the bottom so that one takes precedence. Link: http://lkml.kernel.org/r/20191015190706.15989-1-linux@rasmusvillemoes.dk To: "Jonathan Corbet" <corbet@lwn.net> To: linux-kernel@vger.kernel.org Cc: "Andy Shevchenko" <andy.shevchenko@gmail.com> Cc: "Andrew Morton" <akpm@linux-foundation.org> Cc: "Joe Perches" <joe@perches.com> Cc: linux-doc@vger.kernel.org Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: Uwe Kleine-König <uwe@kleine-koenig.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [andy.shevchenko@gmail.com: use abs()] Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
2019-10-15 22:07:05 +03:00
case 'e':
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
process = true;
break;
default:
if (!isalnum(*fmt)) {
process = true;
break;
}
/* Pointer dereference was already processed */
if (str < end) {
len = copy = strlen(args);
if (copy > end - str)
copy = end - str;
memcpy(str, args, copy);
str += len;
vsprintf: Fix off-by-one bug in bstr_printf() processing dereferenced pointers The functions vbin_printf() and bstr_printf() are used by trace_printk() to try to keep the overhead down during printing. trace_printk() uses vbin_printf() at the time of execution, as it only scans the fmt string to record the printf values into the buffer, and then uses vbin_printf() to do the conversions to print the string based on the format and the saved values in the buffer. This is an issue for dereferenced pointers, as before commit 841a915d20c7b, the processing of the pointer could happen some time after the pointer value was recorded (reading the trace buffer). This means the processing of the value at a later time could show different results, or even crash the system, if the pointer no longer existed. Commit 841a915d20c7b addressed this by processing dereferenced pointers at the time of execution and save the result in the ring buffer as a string. The bstr_printf() would then treat these pointers as normal strings, and print the value. But there was an off-by-one bug here, where after processing the argument, it move the pointer only "strlen(arg)" which made the arg pointer not point to the next argument in the ring buffer, but instead point to the nul character of the last argument. This causes any values after a dereferenced pointer to be corrupted. Cc: stable@vger.kernel.org Fixes: 841a915d20c7b ("vsprintf: Do not have bprintf dereference pointers") Reported-by: Nikolay Borisov <nborisov@suse.com> Tested-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2018-10-05 17:08:03 +03:00
args += len + 1;
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
}
}
if (process)
str = pointer(fmt, str, end, get_arg(void *), spec);
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
while (isalnum(*fmt))
fmt++;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
vsprintf: Do not have bprintf dereference pointers When trace_printk() was introduced, it was discussed that making it be as low overhead as possible, that the processing of the format string should be delayed until it is read. That is, a "trace_printk()" should not convert the %d into numbers and so on, but instead, save the fmt string and all the args in the buffer at the time of recording. When the trace_printk() data is read, it would then parse the format string and do the conversions of the saved arguments in the tracing buffer. The code to perform this was added to vsprintf where vbin_printf() would save the arguments of a specified format string in a buffer, then bstr_printf() could be used to convert the buffer with the same format string into the final output, as if vsprintf() was called in one go. The issue arises when dereferenced pointers are used. The problem is that something like %*pbl which reads a bitmask, will save the pointer to the bitmask in the buffer. Then the reading of the buffer via bstr_printf() will then look at the pointer to process the final output. Obviously the value of that pointer could have changed since the time it was recorded to the time the buffer is read. Worse yet, the bitmask could be unmapped, and the reading of the trace buffer could actually cause a kernel oops. Another problem is that user space tools such as perf and trace-cmd do not have access to the contents of these pointers, and they become useless when the tracing buffer is extracted. Instead of having vbin_printf() simply save the pointer in the buffer for later processing, have it perform the formatting at the time bin_printf() is called. This will fix the issue of dereferencing pointers at a later time, and has the extra benefit of having user space tools understand these values. Since perf and trace-cmd already can handle %p[sSfF] via saving kallsyms, their pointers are saved and not processed during vbin_printf(). If they were converted, it would break perf and trace-cmd, as they would not know how to deal with the conversion. Link: http://lkml.kernel.org/r/20171228204025.14a71d8f@gandalf.local.home Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2017-12-29 04:40:25 +03:00
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
break;
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
case FORMAT_TYPE_INVALID:
goto out;
default: {
unsigned long long num;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = get_arg(long long);
break;
case FORMAT_TYPE_ULONG:
case FORMAT_TYPE_LONG:
num = get_arg(unsigned long);
break;
case FORMAT_TYPE_SIZE_T:
num = get_arg(size_t);
break;
case FORMAT_TYPE_PTRDIFF:
num = get_arg(ptrdiff_t);
break;
case FORMAT_TYPE_UBYTE:
num = get_arg(unsigned char);
break;
case FORMAT_TYPE_BYTE:
num = get_arg(signed char);
break;
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
case FORMAT_TYPE_USHORT:
num = get_arg(unsigned short);
break;
case FORMAT_TYPE_SHORT:
num = get_arg(short);
break;
case FORMAT_TYPE_UINT:
num = get_arg(unsigned int);
break;
default:
num = get_arg(int);
}
str = number(str, end, num, spec);
} /* default: */
} /* switch(spec.type) */
} /* while(*fmt) */
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
lib/vsprintf.c: handle invalid format specifiers more robustly If we meet any invalid or unsupported format specifier, 'handling' it by just printing it as a literal string is not safe: Presumably the format string and the arguments passed gcc's type checking, but that means something like sprintf(buf, "%n %pd", &intvar, dentry) would end up interpreting &intvar as a struct dentry*. When the offending specifier was %n it used to be at the end of the format string, but we can't rely on that always being the case. Also, gcc doesn't complain about some more or less exotic qualifiers (or 'length modifiers' in posix-speak) such as 'j' or 'q', but being unrecognized by the kernel's printf implementation, they'd be interpreted as unknown specifiers, and the rest of arguments would be interpreted wrongly. So let's complain about anything we don't understand, not just %n, and stop pretending that we'd be able to make sense of the rest of the format/arguments. If the offending specifier is in a printk() call we unfortunately only get a "BUG: recent printk recursion!", but at least direct users of the sprintf family will be caught. Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Martin Kletzander <mkletzan@redhat.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 03:30:20 +03:00
out:
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
vsprintf: unify the format decoding layer for its 3 users An new optimization is making its way to ftrace. Its purpose is to make trace_printk() consuming less memory and be faster. Written by Lai Jiangshan, the approach is to delay the formatting job from tracing time to output time. Currently, a call to trace_printk() will format the whole string and insert it into the ring buffer. Then you can read it on /debug/tracing/trace file. The new implementation stores the address of the format string and the binary parameters into the ring buffer, making the packet more compact and faster to insert. Later, when the user exports the traces, the format string is retrieved with the binary parameters and the formatting job is eventually done. The new implementation rewrites a lot of format decoding bits from vsnprintf() function, making now 3 differents functions to maintain in their duplicated parts of printf format decoding bits. Suggested by Ingo Molnar, this patch tries to factorize the most possible common bits from these functions. The real common part between them is the format decoding. Although they do somewhat similar jobs, their way to export or import the parameters is very different. Thus, only the decoding layer is extracted, unless you see other parts that could be worth factorized. Changes in V2: - Address a suggestion from Linus to group the format_decode() parameters inside a structure. Changes in v3: - Address other cleanups suggested by Ingo and Linus such as passing the printf_spec struct to the format helpers: pointer()/number()/string() Note that this struct is passed by copy and not by address. This is to avoid side effects because these functions often change these values and the changes shoudn't be persistant when a callee helper returns. It would be too risky. - Various cleanups (code alignement, switch/case instead of if/else fountains). - Fix a bug that printed the first format specifier following a %p Changes in v4: - drop unapropriate const qualifier loss while casting fmt to a char * (thanks to Vegard Nossum for having pointed this out). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1236356510-8381-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-06 19:21:50 +03:00
#undef get_arg
/* the trailing null byte doesn't count towards the total */
return str - buf;
}
EXPORT_SYMBOL_GPL(bstr_printf);
/**
* bprintf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of words(u32) written
* into @bin_buf.
*/
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
{
va_list args;
int ret;
va_start(args, fmt);
ret = vbin_printf(bin_buf, size, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL_GPL(bprintf);
#endif /* CONFIG_BINARY_PRINTF */
/**
* vsscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: format of buffer
* @args: arguments
*/
int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
u8 qualifier;
unsigned int base;
union {
long long s;
unsigned long long u;
} val;
s16 field_width;
bool is_sign;
while (*fmt) {
/* skip any white space in format */
/* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
tree-wide: convert open calls to remove spaces to skip_spaces() lib function Makes use of skip_spaces() defined in lib/string.c for removing leading spaces from strings all over the tree. It decreases lib.a code size by 47 bytes and reuses the function tree-wide: text data bss dec hex filename 64688 584 592 65864 10148 (TOTALS-BEFORE) 64641 584 592 65817 10119 (TOTALS-AFTER) Also, while at it, if we see (*str && isspace(*str)), we can be sure to remove the first condition (*str) as the second one (isspace(*str)) also evaluates to 0 whenever *str == 0, making it redundant. In other words, "a char equals zero is never a space". Julia Lawall tried the semantic patch (http://coccinelle.lip6.fr) below, and found occurrences of this pattern on 3 more files: drivers/leds/led-class.c drivers/leds/ledtrig-timer.c drivers/video/output.c @@ expression str; @@ ( // ignore skip_spaces cases while (*str && isspace(*str)) { \(str++;\|++str;\) } | - *str && isspace(*str) ) Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com> Cc: Julia Lawall <julia@diku.dk> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Richard Purdie <rpurdie@rpsys.net> Cc: Neil Brown <neilb@suse.de> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br> Cc: David Howells <dhowells@redhat.com> Cc: <linux-ext4@vger.kernel.org> Cc: Samuel Ortiz <samuel@sortiz.org> Cc: Patrick McHardy <kaber@trash.net> Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 05:01:06 +03:00
fmt = skip_spaces(++fmt);
str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
if (*fmt != '%' && *fmt) {
if (*fmt++ != *str++)
break;
continue;
}
if (!*fmt)
break;
++fmt;
/* skip this conversion.
* advance both strings to next white space
*/
if (*fmt == '*') {
if (!*str)
break;
sscanf: implement basic character sets Implement basic character sets for the '%[' conversion specifier. The '%[' conversion specifier matches a nonempty sequence of characters from the specified set of accepted (or with '^', rejected) characters between the brackets. The substring matched is to be made up of characters in (or not in) the set. This is useful for matching substrings that are delimited by something other than spaces. This implementation differs from its glibc counterpart in the following ways: (1) No support for character ranges (e.g., 'a-z' or '0-9') (2) The hyphen '-' is not a special character (3) The closing bracket ']' cannot be matched (4) No support (yet) for discarding matching input ('%*[') The bitmap code is largely based upon sample code which was provided by Rasmus. The motivation for adding character set support to sscanf originally stemmed from the kernel livepatching project. An ongoing patchset utilizes new livepatch Elf symbol and section names to store important metadata livepatch needs to properly apply its patches. Such metadata is stored in these section and symbol names as substrings delimited by periods '.' and commas ','. For example, a livepatch symbol name might look like this: .klp.sym.vmlinux.printk,0 However, sscanf currently can only extract "substrings" delimited by whitespace using the "%s" specifier. Thus for the above symbol name, one cannot not use sscanf() to extract substrings "vmlinux" or "printk", for example. A number of discussions on the livepatch mailing list dealing with string parsing code for extracting these '.' and ',' delimited substrings eventually led to the conclusion that such code would be completely unnecessary if the kernel sscanf() supported character sets. Thus only a single sscanf() call would be necessary to extract these substrings. In addition, such an addition to sscanf() could benefit other areas of the kernel that might have a similar need in the future. [akpm@linux-foundation.org: 80-col tweaks] Signed-off-by: Jessica Yu <jeyu@redhat.com> Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-18 00:23:07 +03:00
while (!isspace(*fmt) && *fmt != '%' && *fmt) {
/* '%*[' not yet supported, invalid format */
if (*fmt == '[')
return num;
fmt++;
sscanf: implement basic character sets Implement basic character sets for the '%[' conversion specifier. The '%[' conversion specifier matches a nonempty sequence of characters from the specified set of accepted (or with '^', rejected) characters between the brackets. The substring matched is to be made up of characters in (or not in) the set. This is useful for matching substrings that are delimited by something other than spaces. This implementation differs from its glibc counterpart in the following ways: (1) No support for character ranges (e.g., 'a-z' or '0-9') (2) The hyphen '-' is not a special character (3) The closing bracket ']' cannot be matched (4) No support (yet) for discarding matching input ('%*[') The bitmap code is largely based upon sample code which was provided by Rasmus. The motivation for adding character set support to sscanf originally stemmed from the kernel livepatching project. An ongoing patchset utilizes new livepatch Elf symbol and section names to store important metadata livepatch needs to properly apply its patches. Such metadata is stored in these section and symbol names as substrings delimited by periods '.' and commas ','. For example, a livepatch symbol name might look like this: .klp.sym.vmlinux.printk,0 However, sscanf currently can only extract "substrings" delimited by whitespace using the "%s" specifier. Thus for the above symbol name, one cannot not use sscanf() to extract substrings "vmlinux" or "printk", for example. A number of discussions on the livepatch mailing list dealing with string parsing code for extracting these '.' and ',' delimited substrings eventually led to the conclusion that such code would be completely unnecessary if the kernel sscanf() supported character sets. Thus only a single sscanf() call would be necessary to extract these substrings. In addition, such an addition to sscanf() could benefit other areas of the kernel that might have a similar need in the future. [akpm@linux-foundation.org: 80-col tweaks] Signed-off-by: Jessica Yu <jeyu@redhat.com> Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-18 00:23:07 +03:00
}
while (!isspace(*str) && *str)
str++;
continue;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt)) {
field_width = skip_atoi(&fmt);
if (field_width <= 0)
break;
}
/* get conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
*fmt == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
qualifier = 'H';
fmt++;
} else if (qualifier == 'l') {
qualifier = 'L';
fmt++;
}
}
}
if (!*fmt)
break;
if (*fmt == 'n') {
/* return number of characters read so far */
*va_arg(args, int *) = str - buf;
++fmt;
continue;
}
if (!*str)
break;
base = 10;
is_sign = false;
switch (*fmt++) {
case 'c':
{
char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
*s++ = *str++;
} while (--field_width > 0 && *str);
num++;
}
continue;
case 's':
{
char *s = (char *)va_arg(args, char *);
if (field_width == -1)
field_width = SHRT_MAX;
/* first, skip leading white space in buffer */
tree-wide: convert open calls to remove spaces to skip_spaces() lib function Makes use of skip_spaces() defined in lib/string.c for removing leading spaces from strings all over the tree. It decreases lib.a code size by 47 bytes and reuses the function tree-wide: text data bss dec hex filename 64688 584 592 65864 10148 (TOTALS-BEFORE) 64641 584 592 65817 10119 (TOTALS-AFTER) Also, while at it, if we see (*str && isspace(*str)), we can be sure to remove the first condition (*str) as the second one (isspace(*str)) also evaluates to 0 whenever *str == 0, making it redundant. In other words, "a char equals zero is never a space". Julia Lawall tried the semantic patch (http://coccinelle.lip6.fr) below, and found occurrences of this pattern on 3 more files: drivers/leds/led-class.c drivers/leds/ledtrig-timer.c drivers/video/output.c @@ expression str; @@ ( // ignore skip_spaces cases while (*str && isspace(*str)) { \(str++;\|++str;\) } | - *str && isspace(*str) ) Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com> Cc: Julia Lawall <julia@diku.dk> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Richard Purdie <rpurdie@rpsys.net> Cc: Neil Brown <neilb@suse.de> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br> Cc: David Howells <dhowells@redhat.com> Cc: <linux-ext4@vger.kernel.org> Cc: Samuel Ortiz <samuel@sortiz.org> Cc: Patrick McHardy <kaber@trash.net> Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 05:01:06 +03:00
str = skip_spaces(str);
/* now copy until next white space */
while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
*s = '\0';
num++;
}
continue;
sscanf: implement basic character sets Implement basic character sets for the '%[' conversion specifier. The '%[' conversion specifier matches a nonempty sequence of characters from the specified set of accepted (or with '^', rejected) characters between the brackets. The substring matched is to be made up of characters in (or not in) the set. This is useful for matching substrings that are delimited by something other than spaces. This implementation differs from its glibc counterpart in the following ways: (1) No support for character ranges (e.g., 'a-z' or '0-9') (2) The hyphen '-' is not a special character (3) The closing bracket ']' cannot be matched (4) No support (yet) for discarding matching input ('%*[') The bitmap code is largely based upon sample code which was provided by Rasmus. The motivation for adding character set support to sscanf originally stemmed from the kernel livepatching project. An ongoing patchset utilizes new livepatch Elf symbol and section names to store important metadata livepatch needs to properly apply its patches. Such metadata is stored in these section and symbol names as substrings delimited by periods '.' and commas ','. For example, a livepatch symbol name might look like this: .klp.sym.vmlinux.printk,0 However, sscanf currently can only extract "substrings" delimited by whitespace using the "%s" specifier. Thus for the above symbol name, one cannot not use sscanf() to extract substrings "vmlinux" or "printk", for example. A number of discussions on the livepatch mailing list dealing with string parsing code for extracting these '.' and ',' delimited substrings eventually led to the conclusion that such code would be completely unnecessary if the kernel sscanf() supported character sets. Thus only a single sscanf() call would be necessary to extract these substrings. In addition, such an addition to sscanf() could benefit other areas of the kernel that might have a similar need in the future. [akpm@linux-foundation.org: 80-col tweaks] Signed-off-by: Jessica Yu <jeyu@redhat.com> Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-18 00:23:07 +03:00
/*
* Warning: This implementation of the '[' conversion specifier
* deviates from its glibc counterpart in the following ways:
* (1) It does NOT support ranges i.e. '-' is NOT a special
* character
* (2) It cannot match the closing bracket ']' itself
* (3) A field width is required
* (4) '%*[' (discard matching input) is currently not supported
*
* Example usage:
* ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]",
* buf1, buf2, buf3);
* if (ret < 3)
* // etc..
*/
case '[':
{
char *s = (char *)va_arg(args, char *);
DECLARE_BITMAP(set, 256) = {0};
unsigned int len = 0;
bool negate = (*fmt == '^');
/* field width is required */
if (field_width == -1)
return num;
if (negate)
++fmt;
for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
set_bit((u8)*fmt, set);
/* no ']' or no character set found */
if (!*fmt || !len)
return num;
++fmt;
if (negate) {
bitmap_complement(set, set, 256);
/* exclude null '\0' byte */
clear_bit(0, set);
}
/* match must be non-empty */
if (!test_bit((u8)*str, set))
return num;
while (test_bit((u8)*str, set) && field_width--)
*s++ = *str++;
*s = '\0';
++num;
}
continue;
case 'o':
base = 8;
break;
case 'x':
case 'X':
base = 16;
break;
case 'i':
base = 0;
fallthrough;
case 'd':
is_sign = true;
fallthrough;
case 'u':
break;
case '%':
/* looking for '%' in str */
if (*str++ != '%')
return num;
continue;
default:
/* invalid format; stop here */
return num;
}
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
tree-wide: convert open calls to remove spaces to skip_spaces() lib function Makes use of skip_spaces() defined in lib/string.c for removing leading spaces from strings all over the tree. It decreases lib.a code size by 47 bytes and reuses the function tree-wide: text data bss dec hex filename 64688 584 592 65864 10148 (TOTALS-BEFORE) 64641 584 592 65817 10119 (TOTALS-AFTER) Also, while at it, if we see (*str && isspace(*str)), we can be sure to remove the first condition (*str) as the second one (isspace(*str)) also evaluates to 0 whenever *str == 0, making it redundant. In other words, "a char equals zero is never a space". Julia Lawall tried the semantic patch (http://coccinelle.lip6.fr) below, and found occurrences of this pattern on 3 more files: drivers/leds/led-class.c drivers/leds/ledtrig-timer.c drivers/video/output.c @@ expression str; @@ ( // ignore skip_spaces cases while (*str && isspace(*str)) { \(str++;\|++str;\) } | - *str && isspace(*str) ) Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com> Cc: Julia Lawall <julia@diku.dk> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Richard Purdie <rpurdie@rpsys.net> Cc: Neil Brown <neilb@suse.de> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br> Cc: David Howells <dhowells@redhat.com> Cc: <linux-ext4@vger.kernel.org> Cc: Samuel Ortiz <samuel@sortiz.org> Cc: Patrick McHardy <kaber@trash.net> Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 05:01:06 +03:00
str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-') {
if (field_width == 1)
break;
digit = *(str + 1);
}
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
|| (base == 8 && (!isdigit(digit) || digit > '7'))
|| (base == 0 && !isdigit(digit)))
break;
if (is_sign)
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
val.s = simple_strntoll(str,
field_width >= 0 ? field_width : INT_MAX,
&next, base);
else
lib: vsprintf: Fix handling of number field widths in vsscanf The existing code attempted to handle numbers by doing a strto[u]l(), ignoring the field width, and then repeatedly dividing to extract the field out of the full converted value. If the string contains a run of valid digits longer than will fit in a long or long long, this would overflow and no amount of dividing can recover the correct value. This patch fixes vsscanf() to obey number field widths when parsing the number. A new _parse_integer_limit() is added that takes a limit for the number of characters to parse. The number field conversion in vsscanf is changed to use this new function. If a number starts with a radix prefix, the field width must be long enough for at last one digit after the prefix. If not, it will be handled like this: sscanf("0x4", "%1i", &i): i=0, scanning continues with the 'x' sscanf("0x4", "%2i", &i): i=0, scanning continues with the '4' This is consistent with the observed behaviour of userland sscanf. Note that this patch does NOT fix the problem of a single field value overflowing the target type. So for example: sscanf("123456789abcdef", "%x", &i); Will not produce the correct result because the value obviously overflows INT_MAX. But sscanf will report a successful conversion. Note that where a very large number is used to mean "unlimited", the value INT_MAX is used for consistency with the behaviour of vsnprintf(). Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210514161206.30821-2-rf@opensource.cirrus.com
2021-05-14 19:12:04 +03:00
val.u = simple_strntoull(str,
field_width >= 0 ? field_width : INT_MAX,
&next, base);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign)
*va_arg(args, signed char *) = val.s;
else
*va_arg(args, unsigned char *) = val.u;
break;
case 'h':
if (is_sign)
*va_arg(args, short *) = val.s;
else
*va_arg(args, unsigned short *) = val.u;
break;
case 'l':
if (is_sign)
*va_arg(args, long *) = val.s;
else
*va_arg(args, unsigned long *) = val.u;
break;
case 'L':
if (is_sign)
*va_arg(args, long long *) = val.s;
else
*va_arg(args, unsigned long long *) = val.u;
break;
case 'z':
*va_arg(args, size_t *) = val.u;
break;
default:
if (is_sign)
*va_arg(args, int *) = val.s;
else
*va_arg(args, unsigned int *) = val.u;
break;
}
num++;
if (!next)
break;
str = next;
}
return num;
}
EXPORT_SYMBOL(vsscanf);
/**
* sscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: formatting of buffer
* @...: resulting arguments
*/
int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsscanf(buf, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sscanf);