1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-03-10 16:58:47 +03:00

o remove steve's insane ramblings from my code.

This commit is contained in:
Joe Thornber 2001-09-26 14:32:07 +00:00
parent cd624d3a4f
commit f9e8171a44
8 changed files with 514 additions and 641 deletions

View File

@ -1,22 +1,9 @@
/*
* device-mapper.h
*
* Copyright (C) 2001 Sistina Software
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* This file is released under the GPL.
*/
/*
@ -34,58 +21,63 @@
#define DM_BLK_MAJOR 124
struct dm_table;
struct dm_dev;
struct text_region;
typedef unsigned int offset_t;
typedef void (*dm_error_fn)(const char *message, void *private);
/* constructor, destructor and map fn types */
typedef void *(*dm_ctr_fn)(struct dm_table *t,
offset_t b, offset_t l,
struct text_region *args);
/*
* constructor, destructor and map fn types
*/
typedef int (*dm_ctr_fn)(struct dm_table *t, offset_t b, offset_t l,
struct text_region *args, void **context,
dm_error_fn err, void *e_private);
typedef void (*dm_dtr_fn)(struct dm_table *t, void *c);
typedef int (*dm_map_fn)(struct buffer_head *bh, int rw, void *context);
typedef int (*dm_err_fn)(struct buffer_head *bh, int rw, void *context);
/*
* Contructors should call this to make sure any
* destination devices are handled correctly
* (ie. opened/closed).
*/
struct dm_dev *dm_table_get_device(struct dm_table *table, const char *path);
void dm_table_put_device(struct dm_table *table, struct dm_dev d);
/*
* information about a target type
*/
struct target_type {
struct list_head list;
const char *name;
long use;
struct module *module;
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
dm_err_fn err;
unsigned long flags;
};
#define TF_BMAP 0x0001 /* Target supports bmap operation */
int dm_register_target(struct target_type *t);
int dm_unregister_target(struct target_type *t);
struct block_device *dm_blkdev_get(const char *path);
int dm_blkdev_put(struct block_device *);
struct text_region {
const char *b;
const char *e;
};
/*
* These may be useful for people writing target
* types.
*/
struct text_region {
const char *b;
const char *e;
};
int dm_get_number(struct text_region *txt, unsigned int *n);
int dm_get_line(struct text_region *txt, struct text_region *line);
int dm_get_word(struct text_region *txt, struct text_region *word);
void dm_txt_copy(char *dest, size_t max, struct text_region *txt);
void dm_eat_space(struct text_region *txt);
#endif /* DEVICE_MAPPER_H */
/*

View File

@ -1,233 +0,0 @@
/*
* dm-blkdev.c
*
* Copyright (C) 2001 Sistina Software
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include <linux/config.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/atomic.h>
#include "dm.h"
struct dm_bdev {
struct list_head list;
struct block_device *bdev;
atomic_t use;
};
#define DMB_HASH_SHIFT 8
#define DMB_HASH_SIZE (1 << DMB_HASH_SHIFT)
#define DMB_HASH_MASK (DMB_HASH_SIZE - 1)
/*
* Lock ordering: Always get bdev_sem before bdev_lock if you need both locks.
*
* bdev_lock: A spinlock which protects the hash table
* bdev_sem: A semaphore which protects blkdev_get / blkdev_put so that we
* are certain to hold only a single reference at any point in time.
*/
static kmem_cache_t *bdev_cachep;
struct list_head bdev_hash[DMB_HASH_SIZE];
static rwlock_t bdev_lock = RW_LOCK_UNLOCKED;
static DECLARE_MUTEX(bdev_sem);
/*
* Subject to change... seems the best solution for now though...
*/
static inline unsigned dm_hash_bdev(struct block_device *bdev)
{
unsigned hash = (unsigned)bdev->bd_dev;
hash ^= (hash >> DMB_HASH_SHIFT);
return hash & DMB_HASH_MASK;
}
static struct dm_bdev *__dm_get_device(struct block_device *bdev, unsigned hash)
{
struct list_head *tmp, *head;
struct dm_bdev *b;
tmp = head = &bdev_hash[hash];
for(;;) {
tmp = tmp->next;
if (tmp == head)
break;
b = list_entry(tmp, struct dm_bdev, list);
if (b->bdev != bdev)
continue;
atomic_inc(&b->use);
return b;
}
return NULL;
}
static struct block_device *dm_get_device(struct block_device *bdev)
{
struct dm_bdev *d, *n;
int rv = 0;
unsigned hash = dm_hash_bdev(bdev);
read_lock(&bdev_lock);
d = __dm_get_device(bdev, hash);
read_unlock(&bdev_lock);
if (d)
return d->bdev;
n = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
if (!n)
return ERR_PTR(-ENOMEM);
n->bdev = bdev;
atomic_set(&n->use, 1);
down(&bdev_sem);
read_lock(&bdev_lock);
d = __dm_get_device(bdev, hash);
read_unlock(&bdev_lock);
if (!d) {
rv = blkdev_get(bdev, FMODE_READ | FMODE_WRITE, 0, BDEV_FILE);
if (rv == 0) {
atomic_inc(&bdev->bd_count);
write_lock(&bdev_lock);
list_add(&n->list, &bdev_hash[hash]);
d = n;
n = NULL;
write_unlock(&bdev_lock);
}
}
if (n) {
kmem_cache_free(bdev_cachep, n);
}
up(&bdev_sem);
return rv ? ERR_PTR(rv) : d->bdev;
}
struct block_device *dm_blkdev_get(const char *path)
{
struct nameidata nd;
struct inode *inode;
struct block_device *bdev;
int err = -ENOENT;
if (path_init(path, LOOKUP_FOLLOW, &nd))
err = path_walk(path, &nd);
if (err) {
bdev = ERR_PTR(err);
goto out;
}
inode = nd.dentry->d_inode;
if (!inode) {
bdev = ERR_PTR(-ENOENT);
goto out;
}
if (!S_ISBLK(inode->i_mode)) {
bdev = ERR_PTR(-ENOTBLK);
goto out;
}
/* Versions? */
#ifdef MNT_NODEV
if (nd.mnt->mnt_flags & MNT_NODEV) {
#else
if (IS_NODEV(inode)) {
#endif
bdev = ERR_PTR(-EACCES);
goto out;
}
bdev = dm_get_device(inode->i_bdev);
out:
path_release(&nd);
return bdev;
}
static void dm_blkdev_drop(struct dm_bdev *d)
{
down(&bdev_sem);
write_lock(&bdev_lock);
if (atomic_read(&d->use) == 0) {
list_del(&d->list);
} else {
d = NULL;
}
write_unlock(&bdev_lock);
if (d) {
blkdev_put(d->bdev, BDEV_FILE);
bdput(d->bdev);
kmem_cache_free(bdev_cachep, d);
}
up(&bdev_sem);
}
int dm_blkdev_put(struct block_device *bdev)
{
struct dm_bdev *d;
int do_drop = 0;
unsigned hash = dm_hash_bdev(bdev);
read_lock(&bdev_lock);
d = __dm_get_device(bdev, hash);
if (d) {
/*
* One for ref that we want to drop,
* one for ref from __dm_get_device()
*/
if (atomic_sub_and_test(2, &d->use))
do_drop = 1;
}
read_unlock(&bdev_lock);
if (do_drop)
dm_blkdev_drop(d);
return (d != NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(dm_blkdev_get);
EXPORT_SYMBOL(dm_blkdev_put);
int dm_init_blkdev(void)
{
int i;
for(i = 0; i < DMB_HASH_SIZE; i++)
INIT_LIST_HEAD(&bdev_hash[i]);
bdev_cachep = kmem_cache_create("dm_bdev", sizeof(struct dm_bdev),
0, 0, NULL, NULL);
if (bdev_cachep == NULL)
return -ENOMEM;
return 0;
}
void dm_cleanup_blkdev(void)
{
if (kmem_cache_destroy(bdev_cachep))
printk(KERN_ERR "Device Mapper: dm_bdev cache not empty\n");
}

View File

@ -1,22 +1,9 @@
/*
* dm-linear.c
*
* Copyright (C) 2001 Sistina Software
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* This file is released under the GPL.
*/
#include <linux/config.h>
@ -33,80 +20,59 @@
* linear: maps a linear range of a device.
*/
struct linear_c {
kdev_t rdev;
long delta; /* FIXME: we need a signed offset type */
struct block_device *bdev;
struct dm_dev *dev;
};
/*
* construct a linear mapping.
* <dev_path> <offset>
*/
static void *linear_ctr(struct dm_table *t, offset_t b, offset_t l,
struct text_region *args)
static int linear_ctr(struct dm_table *t, offset_t b, offset_t l,
struct text_region *args, void **context,
dm_error_fn err, void *e_private)
{
struct linear_c *lc;
unsigned int start;
char path[256];
struct text_region word;
struct block_device *bdev;
int rv = 0;
int hardsect_size;
if (!dm_get_word(args, &word)) {
t->err_msg = "couldn't get device path";
return ERR_PTR(-EINVAL);
}
dm_txt_copy(path, sizeof(path) - 1, &word);
bdev = dm_blkdev_get(path);
if (IS_ERR(bdev)) {
switch (PTR_ERR(bdev)) {
case -ENOTBLK:
t->err_msg = "not a block device";
break;
case -EACCES:
t->err_msg = "nodev mount option";
break;
case -ENOENT:
default:
t->err_msg = "no such device";
}
return bdev;
}
if (!dm_get_number(args, &start)) {
t->err_msg = "destination start not given";
rv = -EINVAL;
goto out_bdev_put;
}
int r = -EINVAL;
if (!(lc = kmalloc(sizeof(lc), GFP_KERNEL))) {
t->err_msg = "couldn't allocate memory for linear context\n";
rv = -ENOMEM;
goto out_bdev_put;
err("couldn't allocate memory for linear context", e_private);
return -ENOMEM;
}
if (!dm_get_word(args, &word)) {
err("couldn't get device path", e_private);
goto bad;
}
dm_txt_copy(lc->path, sizeof(lc->path) - 1, &word);
if (!dm_get_number(args, &start)) {
err("destination start not given", e_private);
goto bad;
}
if ((r = dm_table_add_device(t, lc->path, &lc->dev))) {
err("couldn't lookup device", e_private);
r = -ENXIO;
goto bad;
}
lc->rdev = to_kdev_t(bdev->bd_dev);
lc->bdev = bdev;
lc->delta = (int) start - (int) b;
*context = lc;
return 0;
hardsect_size = get_hardsect_size(lc->rdev);
if (t->hardsect_size > hardsect_size)
t->hardsect_size = hardsect_size;
return lc;
out_bdev_put:
dm_blkdev_put(bdev);
return ERR_PTR(rv);
bad:
kfree(lc);
return r;
}
static void linear_dtr(struct dm_table *t, void *c)
{
struct linear_c *lc = (struct linear_c *) c;
dm_blkdev_put(lc->bdev);
dm_table_remove_device(t, lc->dev);
kfree(c);
}
@ -125,30 +91,26 @@ static struct target_type linear_target = {
ctr: linear_ctr,
dtr: linear_dtr,
map: linear_map,
flags: TF_BMAP,
};
static int __init linear_init(void)
{
int rv;
int r = dm_register_target(&linear_target);
rv = dm_register_target(&linear_target);
if (rv < 0) {
printk(KERN_ERR "Device mapper: Linear: register failed %d\n", rv);
}
if (r < 0)
printk(KERN_ERR
"Device mapper: Linear: register failed %d\n", r);
return rv;
return r;
}
static void __exit linear_exit(void)
{
int rv;
int r = dm_unregister_target(&linear_target);
rv = dm_unregister_target(&linear_target);
if (rv < 0) {
printk(KERN_ERR "Device mapper: Linear: unregister failed %d\n", rv);
}
if (r < 0)
printk(KERN_ERR
"Device mapper: Linear: unregister failed %d\n", r);
}
module_init(linear_init);

View File

@ -23,7 +23,8 @@
#include "dm.h"
struct dm_table *dm_parse(extract_line_fn line_fn, void *l_private)
struct dm_table *dm_parse(extract_line_fn line_fn, void *l_private,
dm_error_fn err_fn, void *e_private)
{
struct text_region line, word;
struct dm_table *table = dm_table_create();
@ -33,10 +34,14 @@ struct dm_table *dm_parse(extract_line_fn line_fn, void *l_private)
void *context;
int last_line_good = 1, was_error = 0;
if (table == NULL)
return NULL;
if (!table)
return 0;
#define PARSE_ERROR {last_line_good = 0; was_error = 1; continue;}
#define PARSE_ERROR(msg) {\
last_line_good = 0;\
was_error = 1;\
err_fn(msg, e_private);\
continue;}
while (line_fn(&line, l_private)) {
@ -51,31 +56,23 @@ struct dm_table *dm_parse(extract_line_fn line_fn, void *l_private)
continue;
/* sector start */
if (!dm_get_number(&line, &start)) {
table->err_msg = "expecting a number for sector start";
PARSE_ERROR;
}
if (!dm_get_number(&line, &start))
PARSE_ERROR("expecting a number for sector start");
/* length */
if (!dm_get_number(&line, &size)) {
table->err_msg = "expecting a number for region length";
PARSE_ERROR;
}
if (!dm_get_number(&line, &size))
PARSE_ERROR("expecting a number for region length");
/* target type */
if (!dm_get_word(&line, &word)) {
table->err_msg = "target type missing";
PARSE_ERROR;
}
if (!dm_get_word(&line, &word))
PARSE_ERROR("target type missing");
/* we have to copy the target type to a C str */
dm_txt_copy(target_name, sizeof(target_name), &word);
/* lookup the target type */
if (!(ttype = dm_get_target_type(target_name))) {
table->err_msg = "unable to find target type";
PARSE_ERROR;
}
if (!(ttype = dm_get_target_type(target_name)))
PARSE_ERROR("unable to find target type");
/* check there isn't a gap, but only if the last target
parsed ok. */
@ -83,46 +80,35 @@ struct dm_table *dm_parse(extract_line_fn line_fn, void *l_private)
((table->num_targets &&
start != table->highs[table->num_targets - 1] + 1) ||
(!table->num_targets && start))) {
table->err_msg = "gap in target ranges";
PARSE_ERROR;
}
(!table->num_targets && start)))
PARSE_ERROR("gap in target ranges");
/* build the target */
context = ttype->ctr(table, start, size, &line);
if (IS_ERR(context)) {
PARSE_ERROR;
}
if (ttype->ctr(table, start, size, &line, &context,
err_fn, e_private))
PARSE_ERROR("target constructor failed");
/* no point registering the target
if there was an error. */
if (was_error)
if (was_error) {
ttype->dtr(table, context);
continue;
}
/* add the target to the table */
high = start + (size - 1);
if (dm_table_add_target(table, high, ttype, context)) {
table->err_msg = "internal error adding target to table";
PARSE_ERROR;
}
/* Ensure sane block size */
if (table->blksize_size < table->hardsect_size) {
table->err_msg = "block size smaller than hardsect size";
PARSE_ERROR;
}
if (dm_table_add_target(table, high, ttype, context))
PARSE_ERROR("internal error adding target to table");
}
#undef PARSE_ERROR
if (!was_error) {
if (dm_table_complete(table) == 0)
return table;
if (was_error || dm_table_complete(table)) {
dm_table_destroy(table);
return 0;
}
dm_put_table(table);
return 0;
return table;
}
/*

View File

@ -1,22 +1,9 @@
/*
* dm-table.c
*
* Copyright (C) 2001 Sistina Software
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* This file is released under the GPL.
*/
/*
@ -99,10 +86,10 @@ static int alloc_targets(struct dm_table *t, int num)
int size = (sizeof(struct target) + sizeof(offset_t)) * num;
n_highs = vmalloc(size);
if (n_highs == NULL)
if (!n_highs)
return -ENOMEM;
n_targets = (struct target *)(n_highs + num);
n_targets = (struct target *) (n_highs + num);
if (n) {
memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
@ -127,15 +114,6 @@ struct dm_table *dm_table_create(void)
memset(t, 0, sizeof(*t));
atomic_set(&t->refcnt, 1);
atomic_set(&t->pending, 0);
init_waitqueue_head(&t->wait);
t->hardsect_size = PAGE_CACHE_SIZE;
/* FIXME: Let this be specified/changed */
t->blksize_size = BLOCK_SIZE;
/* allocate a single nodes worth of targets to
begin with */
if (alloc_targets(t, KEYS_PER_NODE)) {
@ -146,18 +124,14 @@ struct dm_table *dm_table_create(void)
return t;
}
static void dm_table_destroy(struct dm_table *t)
void dm_table_destroy(struct dm_table *t)
{
int i;
if (atomic_read(&t->pending))
BUG();
/* free the indexes */
for (i = 0; i < t->depth - 1; i++) {
vfree(t->index[i]);
t->index[i] = 0;
}
/* free the indexes (see dm_table_complete) */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
vfree(t->highs);
/* free the targets */
for (i = 0; i < t->num_targets; i++) {
@ -165,19 +139,26 @@ static void dm_table_destroy(struct dm_table *t)
if (tgt->private)
tgt->type->dtr(t, tgt->private);
}
vfree(t->highs);
/* free the device list */
if (t->devices) {
struct dev_list *d, *n;
WARN("there are still devices present, someone isn't "
"calling dm_table_remove_device");
for (d = t->devices; d; d = n) {
n = d->next;
kfree(d);
}
}
kfree(t);
}
void dm_put_table(struct dm_table *t)
{
if (atomic_dec_and_test(&t->refcnt))
dm_table_destroy(t);
}
/*
* checks to see if we need to extend highs or targets
* Checks to see if we need to extend
* highs or targets.
*/
static inline int check_space(struct dm_table *t)
{
@ -187,7 +168,100 @@ static inline int check_space(struct dm_table *t)
return 0;
}
/*
* convert a device path to a kdev_t.
*/
int lookup_device(const char *path, kdev_t dev)
{
int r;
struct nameidata nd;
struct inode *inode;
if (!path_init(path, LOOKUP_FOLLOW, &nd))
return 0;
if ((r = path_walk(path, &nd)))
goto bad;
inode = nd.dentry->d_inode;
if (!inode) {
r = -ENOENT;
goto bad;
}
if (!S_ISBLK(inode->i_mode)) {
r = -EINVAL;
goto bad;
}
dev = inode->i_bdev->bd_dev;
bad:
path_release(&nd);
return r;
}
/*
* see if we've already got a device in the list.
*/
static struct dm_dev *find_device(struct list_head *l, kdev_t dev)
{
struct list_head *tmp;
for (tmp = l->next; tmp != l; l = l->next) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
if (dd->dev == dev)
return dd;
}
return 0;
}
/*
* add a device to the list, or just increment the
* usage count if it's already present.
*/
int dm_table_add_device(struct dm_table *t, const char *path,
struct dm_dev **result)
{
int r;
kdev_t dev;
struct dm_dev *dd;
/* convert the path to a device */
if ((r = lookup_device(path, &dev)))
return r;
dd = find_device(t->devices, kd);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->dev = dev;
dd->bd = 0;
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
}
atomic_inc(&dd->count);
*result = dd;
return 0;
}
/*
* decrement a devices use count and remove it if
* neccessary.
*/
void dm_table_remove_device(struct dm_table *t, struct dm_dev *dd)
{
if (atomic_dec_and_test(&dd->count)) {
list_del(&dd->list);
kfree(dd);
}
}
* adds a target to the map
*/
int dm_table_add_target(struct dm_table *t, offset_t high,
@ -211,7 +285,8 @@ int dm_table_add_target(struct dm_table *t, offset_t high,
*/
int dm_table_complete(struct dm_table *t)
{
int i, leaf_nodes;
int i, leaf_nodes, total = 0;
offset_t *indexes;
/* how many indexes will the btree have ? */
leaf_nodes = div_up(t->num_targets, KEYS_PER_NODE);
@ -221,21 +296,24 @@ int dm_table_complete(struct dm_table *t)
t->counts[t->depth - 1] = leaf_nodes;
t->index[t->depth - 1] = t->highs;
/* set up internal nodes, bottom-up */
/* allocate the space for *all* the indexes */
for (i = t->depth - 2; i >= 0; i--) {
t->counts[i] = div_up(t->counts[i + 1], CHILDREN_PER_NODE);
t->index[i] = vmalloc(NODE_SIZE * t->counts[i]);
if (!t->index[i])
goto free_indices;
total += t->counts[i];
}
if (!(indexes = vmalloc(NODE_SIZE * total)))
return -ENOMEM;
/* set up internal nodes, bottom-up */
for (i = t->depth - 2, total = 0; i >= 0; i--) {
t->index[i] = indexes + (KEYS_PER_NODE * t->counts[i]);
setup_btree_index(i, t);
}
return 0;
free_indices:
for(++i; i < t->depth - 1; i++) {
vfree(t->index[i]);
}
return -ENOMEM;
}
EXPORT_SYMBOL(dm_table_add_device);

View File

@ -13,34 +13,42 @@
#include "dm.h"
#include <linux/kmod.h>
struct tt_internal {
struct target_type tt;
struct list_head list;
long use;
};
static LIST_HEAD(_targets);
static rwlock_t _lock = RW_LOCK_UNLOCKED;
#define DM_MOD_NAME_SIZE 32
static inline struct target_type *__get_target_type(const char *name)
static inline struct tt_internal *__find_target_type(const char *name)
{
struct list_head *tmp, *head;
struct target_type *t;
struct list_head *tmp;
struct tt_internal *ti;
for(tmp = _targets.next; tmp != &_targets; tmp = tmp->next) {
t = list_entry(tmp, struct target_type, list);
if (!strcmp(name, t->name)) {
if (!t->use && t->module)
__MOD_INC_USE_COUNT(t->module);
t->use++;
ti = list_entry(tmp, struct tt_internal, list);
if (!strcmp(name, ti->tt.name))
return t;
}
}
return 0;
}
static struct target_type *get_target_type(const char *name)
static struct tt_internal *get_target_type(const char *name)
{
struct tt_internal *ti;
read_lock(&_lock);
t = __get_target_type(name);
ti = __get_target_type(name);
if (ti->use == 0 && ti->tt.module)
__MOD_INC_USE_COUNT(ti->tt.module);
ti->use++;
read_unlock(&_lock);
return t;
@ -54,44 +62,60 @@ static void load_module(const char *name)
if (strlen(name) > (DM_MOD_NAME_SIZE - 4))
return NULL;
/* strcat() is only safe due to length check above */
strcat(module_name, name);
request_module(module_name);
}
struct target_type *dm_get_target_type(const char *name)
{
t = get_target_type(name);
struct tt_internal *ti = get_target_type(name);
if (!t) {
if (!ti) {
load_module(name);
t = get_target_type(name);
ti = get_target_type(name);
}
return t;
return ti ? &ti->tt : 0;
}
void dm_put_target_type(struct target_type *t)
{
read_lock(&_lock);
if (--t->use == 0 && t->module)
__MOD_DEC_USE_COUNT(t->module);
struct tt_internal *ti = (struct target_type *) t;
if (t->use < 0)
read_lock(&_lock);
if (--ti->use == 0 && ti->tt.module)
__MOD_DEC_USE_COUNT(t->tt.module);
if (ti->use < 0)
BUG();
read_unlock(&_lock);
}
static int alloc_target(struct target_type *t)
{
struct tt_internal *ti = kmalloc(sizeof(*ti));
if (ti) {
memset(ti, 0, sizeof(*ti));
ti->tt = t;
}
return ti;
}
int dm_register_target(struct target_type *t)
{
int rv = 0;
struct tt_internal *ti = alloc_target(t);
if (!ti)
return -ENOMEM;
write_lock(&_lock);
if (__get_target_type(t->name)) {
if (__find_target_type(t->name))
rv = -EEXIST;
goto out;
}
list_add(&t->list, &_targets);
else
list_add(&ti->list, &_targets);
out:
write_unlock(&_lock);
@ -100,11 +124,13 @@ int dm_register_target(struct target_type *t)
int dm_unregister_target(struct target_type *t)
{
struct tt_internal *ti = (struct tt_internal *) t;
int rv = -ETXTBSY;
write_lock(&_lock);
if (t->use == 0) {
list_del(&t->list);
if (ti->use == 0) {
list_del(&ti->list);
kfree(ti);
rv = 0;
}
write_unlock(&_lock);
@ -116,11 +142,11 @@ int dm_unregister_target(struct target_type *t)
* io-err: always fails an io, useful for bringing
* up LV's that have holes in them.
*/
static void *io_err_ctr(struct dm_table *t, offset_t b, offset_t l,
struct text_region *args)
static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l,
struct text_region *args, void **context)
{
/* this takes no arguments */
return NULL;
*context = 0;
return 0;
}
static void io_err_dtr(struct dm_table *t, void *c)

View File

@ -35,27 +35,28 @@
#define DEVICE_OFF(d) /* do-nothing */
#include <linux/blk.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h>
#include <linux/lvm.h>
#include <linux/kmod.h>
#define MAX_DEVICES 64
#define DEFAULT_READ_AHEAD 64
const char *_name = "device-mapper";
int _version[3] = { 0, 1, 0 };
int _version[3] = {0, 1, 0};
struct io_hook {
struct dm_table *table;
struct target *target;
struct mapped_device *md;
int rw;
void (*end_io) (struct buffer_head * bh, int uptodate);
void (*end_io)(struct buffer_head * bh, int uptodate);
void *context;
};
kmem_cache_t *_io_hook_cache;
#define rl down_read(&_dev_lock)
#define ru up_read(&_dev_lock)
#define wl down_write(&_dev_lock)
#define wu up_write(&_dev_lock)
struct rw_semaphore _dev_lock;
static struct mapped_device *_devs[MAX_DEVICES];
@ -79,13 +80,15 @@ static int dm_init(void)
init_rwsem(&_dev_lock);
if (!(_io_hook_cache =
kmem_cache_create("dm io hooks", sizeof (struct io_hook),
0, 0, NULL, NULL)))
if (!_io_hook_cache)
_io_hook_cache = kmem_cache_create("dm io hooks",
sizeof(struct io_hook),
0, 0, NULL, NULL);
if (!_io_hook_cache)
return -ENOMEM;
if ((ret = dmfs_init()) || (ret = dm_target_init())
|| (ret = dm_init_blkdev()))
if ((ret = dm_fs_init()) || (ret = dm_target_init()))
return ret;
/* set up the arrays */
@ -112,9 +115,9 @@ static void dm_exit(void)
{
if (kmem_cache_destroy(_io_hook_cache))
WARN("it looks like there are still some io_hooks allocated");
_io_hook_cache = 0;
dmfs_exit();
dm_cleanup_blkdev();
dm_fs_exit();
if (devfs_unregister_blkdev(MAJOR_NR, _name) < 0)
printk(KERN_ERR "%s -- unregister_blkdev failed\n", _name);
@ -139,16 +142,16 @@ static int dm_blk_open(struct inode *inode, struct file *file)
if (minor >= MAX_DEVICES)
return -ENXIO;
down_write(&_dev_lock);
wl;
md = _devs[minor];
if (!md || !is_active(md)) {
up_write(&_dev_lock);
wu;
return -ENXIO;
}
md->use_count++;
up_write(&_dev_lock);
wu;
MOD_INC_USE_COUNT;
return 0;
@ -162,16 +165,16 @@ static int dm_blk_close(struct inode *inode, struct file *file)
if (minor >= MAX_DEVICES)
return -ENXIO;
down_write(&_dev_lock);
wl;
md = _devs[minor];
if (!md || md->use_count < 1) {
WARN("reference count in mapped_device incorrect");
up_write(&_dev_lock);
wu;
return -ENXIO;
}
md->use_count--;
up_write(&_dev_lock);
wu;
MOD_DEC_USE_COUNT;
return 0;
@ -257,8 +260,7 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file,
return dm_user_bmap(inode, (struct lv_bmap *)a);
default:
printk(KERN_WARNING "%s - unknown block ioctl %d",
_name, command);
WARN("unknown block ioctl %d", command);
return -EINVAL;
}
@ -282,7 +284,7 @@ static inline void free_io_hook(struct io_hook *ih)
*/
static inline struct deferred_io *alloc_deferred(void)
{
return kmalloc(sizeof (struct deferred_io), GFP_NOIO);
return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
}
static inline void free_deferred(struct deferred_io *di)
@ -290,6 +292,19 @@ static inline void free_deferred(struct deferred_io *di)
kfree(di);
}
/*
* call a targets optional error function if
* an io failed.
*/
static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh)
{
dm_err_fn err = ih->target->type->err;
if (err)
return err(bh, ih->rw, ih->target->private);
return 0;
}
/*
* bh->b_end_io routine that decrements the
* pending count and then calls the original
@ -299,14 +314,12 @@ static void dec_pending(struct buffer_head *bh, int uptodate)
{
struct io_hook *ih = bh->b_private;
if (!uptodate && ih->target->type->err) {
if (ih->target->type->err(bh, ih->rw, ih->target->private))
return;
}
if (!uptodate && call_err_fn(ih, bh))
return;
if (atomic_dec_and_test(&ih->table->pending))
if (atomic_dec_and_test(&ih->md->pending))
/* nudge anyone waiting on suspend queue */
wake_up(&ih->table->wait);
wake_up(&ih->md->wait);
bh->b_end_io = ih->end_io;
bh->b_private = ih->context;
@ -325,9 +338,9 @@ static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
if (!di)
return -ENOMEM;
down_write(&_dev_lock);
wl;
if (test_bit(DM_ACTIVE, &md->state)) {
up_write(&_dev_lock);
wu;
return 0;
}
@ -335,7 +348,7 @@ static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
di->rw = rw;
di->next = md->deferred;
md->deferred = di;
up_write(&_dev_lock);
wu;
return 1;
}
@ -346,10 +359,10 @@ static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
static inline int __map_buffer(struct mapped_device *md,
struct buffer_head *bh, int rw, int leaf)
{
int r;
dm_map_fn fn;
void *context;
struct io_hook *ih = NULL;
int r;
struct target *ti = md->map->targets + leaf;
fn = ti->type->map;
@ -360,7 +373,7 @@ static inline int __map_buffer(struct mapped_device *md,
if (!ih)
return 0;
ih->table = md->map;
ih->md = md;
ih->rw = rw;
ih->target = ti;
ih->end_io = bh->b_end_io;
@ -370,7 +383,7 @@ static inline int __map_buffer(struct mapped_device *md,
if (r > 0) {
/* hook the end io request fn */
atomic_inc(&md->map->pending);
atomic_inc(&md->pending);
bh->b_end_io = dec_pending;
bh->b_private = ih;
@ -406,8 +419,12 @@ static inline int __find_node(struct dm_table *t, struct buffer_head *bh)
return (KEYS_PER_NODE * n) + k;
}
/* FIXME: Break this up! */
static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
{
#if 1
return -EINVAL;
#else
struct buffer_head bh;
struct mapped_device *md;
unsigned long block;
@ -431,20 +448,18 @@ static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
bh.b_rsector = block * (bh.b_size >> 9);
err = -EINVAL;
down_read(&_dev_lock);
rl;
if (test_bit(DM_ACTIVE, &md->state)) {
struct target *t = md->map->targets + __find_node(md->map, &bh);
struct target_type *target = t->type;
if (target->flags & TF_BMAP) {
err = target->map(&bh, READ, t->private);
if (bh.b_private) {
struct io_hook *ih = (struct io_hook *)bh.b_private;
free_io_hook(ih);
}
err = (err == 0) ? -EINVAL : 0;
err = target->map(&bh, READ, t->private);
if (bh.b_private) {
struct io_hook *ih = (struct io_hook *)bh.b_private;
free_io_hook(ih);
}
err = (err == 0) ? -EINVAL : 0;
}
up_read(&_dev_lock);
ru;
if (err == 0) {
if (put_user(kdev_t_to_nr(bh.b_rdev), &lvb->lv_dev))
@ -454,6 +469,7 @@ static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
}
return err;
#endif
}
static int request(request_queue_t *q, int rw, struct buffer_head *bh)
@ -464,7 +480,7 @@ static int request(request_queue_t *q, int rw, struct buffer_head *bh)
if (minor >= MAX_DEVICES)
goto bad_no_lock;
down_read(&_dev_lock);
rl;
md = _devs[minor];
if (!md || !md->map)
@ -472,28 +488,28 @@ static int request(request_queue_t *q, int rw, struct buffer_head *bh)
/* if we're suspended we have to queue this io for later */
if (!test_bit(DM_ACTIVE, &md->state)) {
up_read(&_dev_lock);
ru;
r = queue_io(md, bh, rw);
if (r < 0)
goto bad_no_lock;
else if (r > 0)
return 0; /* deferred successfully */
return 0; /* deferred successfully */
down_read(&_dev_lock); /* FIXME: there's still a race here */
rl; /* FIXME: there's still a race here */
}
if (!__map_buffer(md, bh, rw, __find_node(md->map, bh)))
goto bad;
up_read(&_dev_lock);
ru;
return 1;
bad:
up_read(&_dev_lock);
bad:
ru;
bad_no_lock:
bad_no_lock:
buffer_IO_error(bh);
return 0;
}
@ -534,19 +550,19 @@ static inline int __any_old_dev(void)
*/
static struct mapped_device *alloc_dev(int minor)
{
struct mapped_device *md = kmalloc(sizeof (*md), GFP_KERNEL);
struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
if (!md)
return 0;
memset(md, 0, sizeof (*md));
down_write(&_dev_lock);
wl;
minor = (minor < 0) ? __any_old_dev() : __specific_dev(minor);
if (minor < 0) {
WARN("no free devices available");
up_write(&_dev_lock);
wu;
kfree(md);
return 0;
}
@ -555,19 +571,90 @@ static struct mapped_device *alloc_dev(int minor)
md->name[0] = '\0';
md->state = 0;
init_waitqueue_head(&md->wait);
_devs[minor] = md;
up_write(&_dev_lock);
wu;
return md;
}
/*
* open a device so we can use it as a map
* destination.
*/
static int open_dev(struct dm_dev *d)
{
int err;
if (d->bd)
BUG();
if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
return -ENOMEM;
if ((err = blkdev_get(d->bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE))) {
bdput(d->bd);
return err;
}
return 0;
}
/*
* close a device that we've been using.
*/
static void close_dev(struct dm_dev *d)
{
if (!d->bd)
return;
blkdev_put(d->bd, BDEV_FILE);
bdput(d->bd);
d->bd = 0;
}
/*
* Open a list of devices.
*/
static int open_devices(struct list_head *devices)
{
int r = 0;
struct list_head *tmp;
for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
if ((r = open_dev(dd)))
goto bad;
}
return 0;
bad:
close_devices(devices);
return r;
}
/*
* Close a list of devices.
*/
static void close_devices(struct dm_list *devices)
{
struct list_head *tmp;
for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
close_dev(dd);
}
}
struct mapped_device *dm_find_by_minor(int minor)
{
struct mapped_device *md;
down_read(&_dev_lock);
rl;
md = _devs[minor];
up_read(&_dev_lock);
ru;
return md;
}
@ -575,10 +662,10 @@ struct mapped_device *dm_find_by_minor(int minor)
static int register_device(struct mapped_device *md)
{
md->devfs_entry =
devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER,
MAJOR(md->dev), MINOR(md->dev),
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
&dm_blk_dops, NULL);
devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER,
MAJOR(md->dev), MINOR(md->dev),
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
&dm_blk_dops, NULL);
if (!md->devfs_entry)
return -ENOMEM;
@ -592,43 +679,6 @@ static int unregister_device(struct mapped_device *md)
return 0;
}
#ifdef CONFIG_HOTPLUG
static void dm_sbin_hotplug(struct mapped_device *md, int create)
{
int i;
char *argv[3];
char *envp[5];
char name[DM_NAME_LEN + 16];
if (!hotplug_path[0])
return;
if (!current->fs->root)
return;
sprintf(name, "DMNAME=%s\n", md->name);
i = 0;
argv[i++] = hotplug_path;
argv[i++] = "devmap";
argv[i] = 0;
i = 0;
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i++] = name;
if (create)
envp[i++] = "ACTION=add";
else
envp[i++] = "ACTION=remove";
envp[i] = 0;
call_usermodehelper(argv[0], argv, envp);
}
#else
#define dm_sbin_hotplug(md, create) do { } while(0)
#endif /* CONFIG_HOTPLUG */
/*
* constructor for a new device
*/
@ -643,15 +693,14 @@ struct mapped_device *dm_create(const char *name, int minor)
if (!(md = alloc_dev(minor)))
return ERR_PTR(-ENXIO);
down_write(&_dev_lock);
wl;
strcpy(md->name, name);
_devs[minor] = md;
if ((r = register_device(md))) {
up_write(&_dev_lock);
wu;
return ERR_PTR(r);
}
up_write(&_dev_lock);
dm_sbin_hotplug(md, 1);
wu;
return md;
}
@ -665,27 +714,44 @@ int dm_remove(struct mapped_device *md)
{
int minor, r;
down_write(&_dev_lock);
wl;
if (md->use_count) {
up_write(&_dev_lock);
wu;
return -EPERM;
}
if ((r = unregister_device(md))) {
up_write(&_dev_lock);
wu;
return r;
}
minor = MINOR(md->dev);
_devs[minor] = 0;
up_write(&_dev_lock);
wu;
dm_sbin_hotplug(md, 0);
kfree(md);
return 0;
}
/*
* the hardsect size for a mapped device is the
* smallest hard sect size from the devices it
* maps onto.
*/
static int __find_hardsect_size(struct dev_list *dl)
{
int result = INT_MAX, size;
while(dl) {
size = get_hardsect_size(dl->dev);
if (size < result)
result = size;
dl = dl->next;
}
return result;
}
/*
* Bind a table to the device.
*/
@ -695,11 +761,11 @@ void __bind(struct mapped_device *md, struct dm_table *t)
md->map = t;
/* In 1024-byte units */
/* in k */
_block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1;
_blksize_size[minor] = t->blksize_size;
_hardsect_size[minor] = t->hardsect_size;
_blksize_size[minor] = BLOCK_SIZE;
_hardsect_size[minor] = __find_hardsect_size(t->devices);
register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
}
@ -725,23 +791,30 @@ static void __flush_deferred_io(struct mapped_device *md)
*/
int dm_activate(struct mapped_device *md, struct dm_table *table)
{
int r;
/* check that the mapping has at least been loaded. */
if (!table->num_targets)
return -EINVAL;
down_write(&_dev_lock);
wl;
/* you must be deactivated first */
if (is_active(md)) {
up_write(&_dev_lock);
wu;
return -EPERM;
}
__bind(md, table);
if ((r = open_devices(md->map->devices))) {
wu;
return r;
}
set_bit(DM_ACTIVE, &md->state);
__flush_deferred_io(md);
up_write(&_dev_lock);
wu;
return 0;
}
@ -752,26 +825,27 @@ int dm_activate(struct mapped_device *md, struct dm_table *table)
*/
int dm_deactivate(struct mapped_device *md)
{
down_read(&_dev_lock);
rl;
if (md->use_count) {
up_read(&_dev_lock);
ru;
return -EPERM;
}
fsync_dev(md->dev);
up_read(&_dev_lock);
ru;
down_write(&_dev_lock);
wl;
if (md->use_count) {
/* drat, somebody got in quick ... */
up_write(&_dev_lock);
wu;
return -EPERM;
}
close_devices(md->map->devices);
md->map = 0;
clear_bit(DM_ACTIVE, &md->state);
up_write(&_dev_lock);
wu;
return 0;
}
@ -789,33 +863,34 @@ void dm_suspend(struct mapped_device *md)
{
DECLARE_WAITQUEUE(wait, current);
down_write(&_dev_lock);
wl;
if (!is_active(md)) {
up_write(&_dev_lock);
wu;
return;
}
clear_bit(DM_ACTIVE, &md->state);
up_write(&_dev_lock);
wu;
/* wait for all the pending io to flush */
add_wait_queue(&md->map->wait, &wait);
add_wait_queue(&md->wait, &wait);
current->state = TASK_UNINTERRUPTIBLE;
do {
down_write(&_dev_lock);
if (!atomic_read(&md->map->pending))
wl;
if (!atomic_read(&md->pending))
break;
up_write(&_dev_lock);
wu;
schedule();
} while (1);
current->state = TASK_RUNNING;
remove_wait_queue(&md->map->wait, &wait);
close_devices(md->map->devices);
md->map = 0;
up_write(&_dev_lock);
wu;
}
struct block_device_operations dm_blk_dops = {

View File

@ -3,20 +3,7 @@
*
* Copyright (C) 2001 Sistina Software
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* This file is released under the GPL.
*/
/*
@ -118,7 +105,6 @@
#ifndef DM_INTERNAL_H
#define DM_INTERNAL_H
#include <linux/config.h>
#include <linux/version.h>
#include <linux/major.h>
#include <linux/iobuf.h>
@ -145,6 +131,19 @@ enum {
DM_ACTIVE, /* device is running */
};
/*
* list of devices that a metadevice uses
* and hence should open/close.
*/
struct dm_dev {
atomic_t count;
struct list_head list;
kdev_t dev;
struct block_device *bd;
};
/*
* io that had to be deferred while we were
* suspended
@ -167,23 +166,18 @@ struct target {
* the btree
*/
struct dm_table {
atomic_t refcnt;
char *err_msg;
/* btree table */
int depth;
int counts[MAX_DEPTH]; /* in nodes */
offset_t *index[MAX_DEPTH];
int blksize_size;
int hardsect_size;
int num_targets;
int num_allocated;
offset_t *highs;
struct target *targets;
atomic_t pending;
wait_queue_head_t wait;
/* a list of devices used by this table */
struct list_head *devices;
};
/*
@ -198,25 +192,25 @@ struct mapped_device {
int state;
/* a list of io's that arrived while we were suspended */
atomic_t pending;
wait_queue_head_t wait;
struct deferred_io *deferred;
struct dm_table *map;
/* used by dm-fs.c */
devfs_handle_t devfs_entry;
struct proc_dir_entry *pde;
};
extern struct block_device_operations dm_blk_dops;
/* dm-target.c */
int dm_target_init(void);
struct target_type *dm_get_target_type(const char *name);
void dm_put_target_type(struct target_type *t);
int dm_target_init(void);
/* dm.c */
struct mapped_device *dm_find_by_name(const char *name);
struct mapped_device *dm_find_by_minor(int minor);
struct mapped_device *dm_create(const char *name, int minor);
@ -230,7 +224,7 @@ void dm_suspend(struct mapped_device *md);
/* dm-table.c */
struct dm_table *dm_table_create(void);
void dm_put_table(struct dm_table *t);
void dm_table_destroy(struct dm_table *t);
int dm_table_add_target(struct dm_table *t, offset_t high,
struct target_type *type, void *private);
@ -240,7 +234,8 @@ int dm_table_complete(struct dm_table *t);
typedef int (*extract_line_fn)(struct text_region *line,
void *private);
struct dm_table *dm_parse(extract_line_fn line_fn, void *line_private);
struct dm_table *dm_parse(extract_line_fn line_fn, void *line_private,
dm_error_fn err_fn, void *err_private);
static inline int dm_empty_tok(struct text_region *txt)
@ -248,13 +243,9 @@ static inline int dm_empty_tok(struct text_region *txt)
return txt->b >= txt->e;
}
/* dm-blkdev.c */
int dm_init_blkdev(void);
void dm_cleanup_blkdev(void);
/* dm-fs.c */
int dmfs_init(void);
void dmfs_exit(void);
int dm_fs_init(void);
void dm_fs_exit(void);
@ -282,8 +273,4 @@ static inline int is_active(struct mapped_device *md)
return test_bit(DM_ACTIVE, &md->state);
}
/*
* FIXME: these are too big to be inlines
*/
#endif