mirror of
https://github.com/samba-team/samba.git
synced 2025-01-08 21:18:16 +03:00
d734547488
Now we have one fixed field for the exclusive lock holder and an array of shared locks. This way we now prioritize writers over readers: If a pending write comes in while readers are active, it will put itself into the exclusive slot. Then it waits for the readers to vanish. Only when all readers are gone the exclusive lock request is granted. New readers will just look at the exclusive slot and see it's taken. They will then line up as watchers, retrying whenever things change. This also means that it will be cheaper to support many shared locks: Granting a shared lock just means to extend the array. We don't have to walk the array for possible conflicts. This also adds explicit UPGRADE and DOWNGRADE operations for better error checking. Signed-off-by: Volker Lendecke <vl@samba.org> Reviewed-by: Jeremy Allison <jra@samba.org>
69 lines
2.0 KiB
C
69 lines
2.0 KiB
C
/*
|
|
Unix SMB/CIFS implementation.
|
|
global locks based on dbwrap and messaging
|
|
Copyright (C) 2009 by Volker Lendecke
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef _G_LOCK_H_
|
|
#define _G_LOCK_H_
|
|
|
|
#include "dbwrap/dbwrap.h"
|
|
|
|
struct g_lock_ctx;
|
|
struct messaging_context;
|
|
|
|
enum g_lock_type {
|
|
G_LOCK_READ,
|
|
G_LOCK_WRITE,
|
|
G_LOCK_UPGRADE,
|
|
G_LOCK_DOWNGRADE,
|
|
};
|
|
|
|
struct g_lock_ctx *g_lock_ctx_init_backend(
|
|
TALLOC_CTX *mem_ctx,
|
|
struct messaging_context *msg,
|
|
struct db_context **backend);
|
|
struct g_lock_ctx *g_lock_ctx_init(TALLOC_CTX *mem_ctx,
|
|
struct messaging_context *msg);
|
|
|
|
struct tevent_req *g_lock_lock_send(TALLOC_CTX *mem_ctx,
|
|
struct tevent_context *ev,
|
|
struct g_lock_ctx *ctx,
|
|
TDB_DATA key,
|
|
enum g_lock_type type);
|
|
NTSTATUS g_lock_lock_recv(struct tevent_req *req);
|
|
NTSTATUS g_lock_lock(struct g_lock_ctx *ctx, TDB_DATA key,
|
|
enum g_lock_type lock_type, struct timeval timeout);
|
|
NTSTATUS g_lock_unlock(struct g_lock_ctx *ctx, TDB_DATA key);
|
|
|
|
NTSTATUS g_lock_write_data(struct g_lock_ctx *ctx, TDB_DATA key,
|
|
const uint8_t *buf, size_t buflen);
|
|
|
|
int g_lock_locks(struct g_lock_ctx *ctx,
|
|
int (*fn)(TDB_DATA key, void *private_data),
|
|
void *private_data);
|
|
NTSTATUS g_lock_dump(struct g_lock_ctx *ctx,
|
|
TDB_DATA key,
|
|
void (*fn)(struct server_id exclusive,
|
|
size_t num_shared,
|
|
struct server_id *shared,
|
|
const uint8_t *data,
|
|
size_t datalen,
|
|
void *private_data),
|
|
void *private_data);
|
|
|
|
#endif
|