From 3d409c16ee7d00012f954e8e819f0f9d48aedb73 Mon Sep 17 00:00:00 2001 From: Andreas Schneider Date: Fri, 31 Mar 2023 11:04:54 +0200 Subject: [PATCH] lib:compression: Fix code spelling Best reviewed with: `git show --word-diff`. Signed-off-by: Andreas Schneider Reviewed-by: Andrew Bartlett --- lib/compression/lzxpress_huffman.c | 6 +++--- lib/compression/lzxpress_huffman.h | 2 +- lib/compression/pycompression.c | 2 +- lib/compression/tests/test_lzx_huffman.c | 4 ++-- lib/compression/tests/test_lzxpress_plain.c | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/compression/lzxpress_huffman.c b/lib/compression/lzxpress_huffman.c index ee0fa2c83e6..4d69cc406ec 100644 --- a/lib/compression/lzxpress_huffman.c +++ b/lib/compression/lzxpress_huffman.c @@ -447,7 +447,7 @@ static int generate_huffman_codes(struct huffman_node *leaf_nodes, n_leaves = 2; } - /* note, in sort we're using internal_nodes as auxillary space */ + /* note, in sort we're using internal_nodes as auxiliary space */ stable_sort(leaf_nodes, internal_nodes, n_leaves, @@ -465,7 +465,7 @@ static int generate_huffman_codes(struct huffman_node *leaf_nodes, * In practice it will almost always resolve in the first round; if * not then, in the second or third. Remember we'll looking at 64k or * less, so the rarest we can have is 1 in 64k; each round of - * quantization effecively doubles its frequency to 1 in 32k, 1 in + * quantization effectively doubles its frequency to 1 in 32k, 1 in * 16k, etc, until we're treating the rare symbol as actually quite * common. */ @@ -591,7 +591,7 @@ static int generate_huffman_codes(struct huffman_node *leaf_nodes, * * We need to sort the nodes of equal depth, so that * they are sorted by depth first, and symbol value - * second. The internal_nodes can again be auxillary + * second. The internal_nodes can again be auxiliary * memory. */ stable_sort( diff --git a/lib/compression/lzxpress_huffman.h b/lib/compression/lzxpress_huffman.h index 232e58920f5..41cca5c40a4 100644 --- a/lib/compression/lzxpress_huffman.h +++ b/lib/compression/lzxpress_huffman.h @@ -45,7 +45,7 @@ struct huffman_node { /* * This struct just coalesces all the memory you need for LZ77 + Huffman - * compresssion together in one bundle. + * compression together in one bundle. * * There are a few different things you want, you usually want them all, so * this makes it easy to allocate them all at once. diff --git a/lib/compression/pycompression.c b/lib/compression/pycompression.c index f67b0ddbe39..3be3620b1cf 100644 --- a/lib/compression/pycompression.c +++ b/lib/compression/pycompression.c @@ -270,7 +270,7 @@ static PyMethodDef mod_methods[] = { }; -#define MODULE_DOC PyDoc_STR("LZXpress compresssion/decompression bindings") +#define MODULE_DOC PyDoc_STR("LZXpress compression/decompression bindings") static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, diff --git a/lib/compression/tests/test_lzx_huffman.c b/lib/compression/tests/test_lzx_huffman.c index b7f22b9072b..7770535c1e9 100644 --- a/lib/compression/tests/test_lzx_huffman.c +++ b/lib/compression/tests/test_lzx_huffman.c @@ -293,7 +293,7 @@ struct lzx_pair bidirectional_pairs[] = { /* * In this case there are no matches encoded as there are no * repeated symbols. Including the EOF, there are 27 symbols - * all occuring exactly as frequently as each other (once). + * all occurring exactly as frequently as each other (once). * From that we would expect the codes to be mostly 5 bits * long, because 27 < 2^5 (32), but greater than 2^4. And * that's what we see. @@ -819,7 +819,7 @@ static void test_lzxpress_huffman_long_random_graph_round_trip(void **state) * simple loop, but we introduce damage into the system, randomly * flipping about 1 bit in 64. * - * The result is semi-structured and compressable. + * The result is semi-structured and compressible. */ uint8_t *d = original.data; uint8_t *table = talloc_array(mem_ctx, uint8_t, 65536); diff --git a/lib/compression/tests/test_lzxpress_plain.c b/lib/compression/tests/test_lzxpress_plain.c index b1a4bee6e92..1c147932d40 100644 --- a/lib/compression/tests/test_lzxpress_plain.c +++ b/lib/compression/tests/test_lzxpress_plain.c @@ -481,7 +481,7 @@ static void test_lzxpress_plain_round_trip_files(void **state) ((double)compressed_total) / reference_total); /* * Assert that the compression is better than Windows. Unlike the - * Huffman varient, where things are very even, here we do much better + * Huffman variant, where things are very even, here we do much better * than Windows without especially trying. */ assert_true(compressed_total <= reference_total); @@ -592,7 +592,7 @@ static void test_lzxpress_plain_long_random_graph_round_trip(void **state) * simple loop, but we introduce damage into the system, randomly * flipping about 1 bit in 64. * - * The result is semi-structured and compressable. + * The result is semi-structured and compressible. */ uint8_t *d = original.data; uint8_t *table = talloc_array(mem_ctx, uint8_t, 65536);