110b24eb1a
When counting and checking hard links in an ntfs file record,
struct MFT_REC {
struct NTFS_RECORD_HEADER rhdr; // 'FILE'
__le16 seq; // 0x10: Sequence number for this record.
>> __le16 hard_links; // 0x12: The number of hard links to record.
__le16 attr_off; // 0x14: Offset to attributes.
...
the ntfs3 driver ignored short names (DOS names), causing the link count
to be reduced by 1 and messages to be output to dmesg.
For Windows, such a situation is a minor error, meaning chkdsk does not report
errors on such a volume, and in the case of using the /f switch, it silently
corrects them, reporting that no errors were found. This does not affect
the consistency of the file system.
Nevertheless, the behavior in the ntfs3 driver is incorrect and
changes the content of the file system. This patch should fix that.
PS: most likely, there has been a confusion of concepts
MFT_REC::hard_links and inode::__i_nlink.
Fixes: 82cae269cf
("fs/ntfs3: Add initialization of super block")
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
Cc: stable@vger.kernel.org
649 lines
13 KiB
C
649 lines
13 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
*
|
|
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
|
|
*
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include "debug.h"
|
|
#include "ntfs.h"
|
|
#include "ntfs_fs.h"
|
|
|
|
static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
|
|
const __le16 *name, u8 name_len,
|
|
const u16 *upcase)
|
|
{
|
|
/* First, compare the type codes. */
|
|
int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
|
|
|
|
if (diff)
|
|
return diff;
|
|
|
|
/* They have the same type code, so we have to compare the names. */
|
|
return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
|
|
upcase, true);
|
|
}
|
|
|
|
/*
|
|
* mi_new_attt_id
|
|
*
|
|
* Return: Unused attribute id that is less than mrec->next_attr_id.
|
|
*/
|
|
static __le16 mi_new_attt_id(struct mft_inode *mi)
|
|
{
|
|
u16 free_id, max_id, t16;
|
|
struct MFT_REC *rec = mi->mrec;
|
|
struct ATTRIB *attr;
|
|
__le16 id;
|
|
|
|
id = rec->next_attr_id;
|
|
free_id = le16_to_cpu(id);
|
|
if (free_id < 0x7FFF) {
|
|
rec->next_attr_id = cpu_to_le16(free_id + 1);
|
|
return id;
|
|
}
|
|
|
|
/* One record can store up to 1024/24 ~= 42 attributes. */
|
|
free_id = 0;
|
|
max_id = 0;
|
|
|
|
attr = NULL;
|
|
|
|
for (;;) {
|
|
attr = mi_enum_attr(mi, attr);
|
|
if (!attr) {
|
|
rec->next_attr_id = cpu_to_le16(max_id + 1);
|
|
mi->dirty = true;
|
|
return cpu_to_le16(free_id);
|
|
}
|
|
|
|
t16 = le16_to_cpu(attr->id);
|
|
if (t16 == free_id) {
|
|
free_id += 1;
|
|
attr = NULL;
|
|
} else if (max_id < t16)
|
|
max_id = t16;
|
|
}
|
|
}
|
|
|
|
int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
|
|
{
|
|
int err;
|
|
struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
|
|
|
|
if (!m)
|
|
return -ENOMEM;
|
|
|
|
err = mi_init(m, sbi, rno);
|
|
if (err) {
|
|
kfree(m);
|
|
return err;
|
|
}
|
|
|
|
err = mi_read(m, false);
|
|
if (err) {
|
|
mi_put(m);
|
|
return err;
|
|
}
|
|
|
|
*mi = m;
|
|
return 0;
|
|
}
|
|
|
|
void mi_put(struct mft_inode *mi)
|
|
{
|
|
mi_clear(mi);
|
|
kfree(mi);
|
|
}
|
|
|
|
int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
|
|
{
|
|
mi->sbi = sbi;
|
|
mi->rno = rno;
|
|
mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
|
|
if (!mi->mrec)
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* mi_read - Read MFT data.
|
|
*/
|
|
int mi_read(struct mft_inode *mi, bool is_mft)
|
|
{
|
|
int err;
|
|
struct MFT_REC *rec = mi->mrec;
|
|
struct ntfs_sb_info *sbi = mi->sbi;
|
|
u32 bpr = sbi->record_size;
|
|
u64 vbo = (u64)mi->rno << sbi->record_bits;
|
|
struct ntfs_inode *mft_ni = sbi->mft.ni;
|
|
struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
|
|
struct rw_semaphore *rw_lock = NULL;
|
|
|
|
if (is_mounted(sbi)) {
|
|
if (!is_mft && mft_ni) {
|
|
rw_lock = &mft_ni->file.run_lock;
|
|
down_read(rw_lock);
|
|
}
|
|
}
|
|
|
|
err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
|
|
if (rw_lock)
|
|
up_read(rw_lock);
|
|
if (!err)
|
|
goto ok;
|
|
|
|
if (err == -E_NTFS_FIXUP) {
|
|
mi->dirty = true;
|
|
goto ok;
|
|
}
|
|
|
|
if (err != -ENOENT)
|
|
goto out;
|
|
|
|
if (rw_lock) {
|
|
ni_lock(mft_ni);
|
|
down_write(rw_lock);
|
|
}
|
|
err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
|
|
vbo >> sbi->cluster_bits);
|
|
if (rw_lock) {
|
|
up_write(rw_lock);
|
|
ni_unlock(mft_ni);
|
|
}
|
|
if (err)
|
|
goto out;
|
|
|
|
if (rw_lock)
|
|
down_read(rw_lock);
|
|
err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
|
|
if (rw_lock)
|
|
up_read(rw_lock);
|
|
|
|
if (err == -E_NTFS_FIXUP) {
|
|
mi->dirty = true;
|
|
goto ok;
|
|
}
|
|
if (err)
|
|
goto out;
|
|
|
|
ok:
|
|
/* Check field 'total' only here. */
|
|
if (le32_to_cpu(rec->total) != bpr) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
return 0;
|
|
|
|
out:
|
|
if (err == -E_NTFS_CORRUPT) {
|
|
ntfs_err(sbi->sb, "mft corrupted");
|
|
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
|
err = -EINVAL;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* mi_enum_attr - start/continue attributes enumeration in record.
|
|
*
|
|
* NOTE: mi->mrec - memory of size sbi->record_size
|
|
* here we sure that mi->mrec->total == sbi->record_size (see mi_read)
|
|
*/
|
|
struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
|
|
{
|
|
const struct MFT_REC *rec = mi->mrec;
|
|
u32 used = le32_to_cpu(rec->used);
|
|
u32 t32, off, asize, prev_type;
|
|
u16 t16;
|
|
u64 data_size, alloc_size, tot_size;
|
|
|
|
if (!attr) {
|
|
u32 total = le32_to_cpu(rec->total);
|
|
|
|
off = le16_to_cpu(rec->attr_off);
|
|
|
|
if (used > total)
|
|
return NULL;
|
|
|
|
if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
|
|
!IS_ALIGNED(off, 4)) {
|
|
return NULL;
|
|
}
|
|
|
|
/* Skip non-resident records. */
|
|
if (!is_rec_inuse(rec))
|
|
return NULL;
|
|
|
|
prev_type = 0;
|
|
attr = Add2Ptr(rec, off);
|
|
} else {
|
|
/* Check if input attr inside record. */
|
|
off = PtrOffset(rec, attr);
|
|
if (off >= used)
|
|
return NULL;
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
if (asize < SIZEOF_RESIDENT) {
|
|
/* Impossible 'cause we should not return such attribute. */
|
|
return NULL;
|
|
}
|
|
|
|
/* Overflow check. */
|
|
if (off + asize < off)
|
|
return NULL;
|
|
|
|
prev_type = le32_to_cpu(attr->type);
|
|
attr = Add2Ptr(attr, asize);
|
|
off += asize;
|
|
}
|
|
|
|
asize = le32_to_cpu(attr->size);
|
|
|
|
/* Can we use the first field (attr->type). */
|
|
if (off + 8 > used) {
|
|
static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
|
|
return NULL;
|
|
}
|
|
|
|
if (attr->type == ATTR_END) {
|
|
/* End of enumeration. */
|
|
return NULL;
|
|
}
|
|
|
|
/* 0x100 is last known attribute for now. */
|
|
t32 = le32_to_cpu(attr->type);
|
|
if (!t32 || (t32 & 0xf) || (t32 > 0x100))
|
|
return NULL;
|
|
|
|
/* attributes in record must be ordered by type */
|
|
if (t32 < prev_type)
|
|
return NULL;
|
|
|
|
/* Check overflow and boundary. */
|
|
if (off + asize < off || off + asize > used)
|
|
return NULL;
|
|
|
|
/* Check size of attribute. */
|
|
if (!attr->non_res) {
|
|
/* Check resident fields. */
|
|
if (asize < SIZEOF_RESIDENT)
|
|
return NULL;
|
|
|
|
t16 = le16_to_cpu(attr->res.data_off);
|
|
if (t16 > asize)
|
|
return NULL;
|
|
|
|
if (le32_to_cpu(attr->res.data_size) > asize - t16)
|
|
return NULL;
|
|
|
|
t32 = sizeof(short) * attr->name_len;
|
|
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
|
|
return NULL;
|
|
|
|
return attr;
|
|
}
|
|
|
|
/* Check nonresident fields. */
|
|
if (attr->non_res != 1)
|
|
return NULL;
|
|
|
|
t16 = le16_to_cpu(attr->nres.run_off);
|
|
if (t16 > asize)
|
|
return NULL;
|
|
|
|
t32 = sizeof(short) * attr->name_len;
|
|
if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
|
|
return NULL;
|
|
|
|
/* Check start/end vcn. */
|
|
if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
|
|
return NULL;
|
|
|
|
data_size = le64_to_cpu(attr->nres.data_size);
|
|
if (le64_to_cpu(attr->nres.valid_size) > data_size)
|
|
return NULL;
|
|
|
|
alloc_size = le64_to_cpu(attr->nres.alloc_size);
|
|
if (data_size > alloc_size)
|
|
return NULL;
|
|
|
|
t32 = mi->sbi->cluster_mask;
|
|
if (alloc_size & t32)
|
|
return NULL;
|
|
|
|
if (!attr->nres.svcn && is_attr_ext(attr)) {
|
|
/* First segment of sparse/compressed attribute */
|
|
if (asize + 8 < SIZEOF_NONRESIDENT_EX)
|
|
return NULL;
|
|
|
|
tot_size = le64_to_cpu(attr->nres.total_size);
|
|
if (tot_size & t32)
|
|
return NULL;
|
|
|
|
if (tot_size > alloc_size)
|
|
return NULL;
|
|
} else {
|
|
if (asize + 8 < SIZEOF_NONRESIDENT)
|
|
return NULL;
|
|
|
|
if (attr->nres.c_unit)
|
|
return NULL;
|
|
}
|
|
|
|
return attr;
|
|
}
|
|
|
|
/*
|
|
* mi_find_attr - Find the attribute by type and name and id.
|
|
*/
|
|
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
|
|
enum ATTR_TYPE type, const __le16 *name,
|
|
u8 name_len, const __le16 *id)
|
|
{
|
|
u32 type_in = le32_to_cpu(type);
|
|
u32 atype;
|
|
|
|
next_attr:
|
|
attr = mi_enum_attr(mi, attr);
|
|
if (!attr)
|
|
return NULL;
|
|
|
|
atype = le32_to_cpu(attr->type);
|
|
if (atype > type_in)
|
|
return NULL;
|
|
|
|
if (atype < type_in)
|
|
goto next_attr;
|
|
|
|
if (attr->name_len != name_len)
|
|
goto next_attr;
|
|
|
|
if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
|
|
goto next_attr;
|
|
|
|
if (id && *id != attr->id)
|
|
goto next_attr;
|
|
|
|
return attr;
|
|
}
|
|
|
|
int mi_write(struct mft_inode *mi, int wait)
|
|
{
|
|
struct MFT_REC *rec;
|
|
int err;
|
|
struct ntfs_sb_info *sbi;
|
|
|
|
if (!mi->dirty)
|
|
return 0;
|
|
|
|
sbi = mi->sbi;
|
|
rec = mi->mrec;
|
|
|
|
err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
|
|
if (err)
|
|
return err;
|
|
|
|
if (mi->rno < sbi->mft.recs_mirr)
|
|
sbi->flags |= NTFS_FLAGS_MFTMIRR;
|
|
|
|
mi->dirty = false;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
|
|
__le16 flags, bool is_mft)
|
|
{
|
|
int err;
|
|
u16 seq = 1;
|
|
struct MFT_REC *rec;
|
|
u64 vbo = (u64)rno << sbi->record_bits;
|
|
|
|
err = mi_init(mi, sbi, rno);
|
|
if (err)
|
|
return err;
|
|
|
|
rec = mi->mrec;
|
|
|
|
if (rno == MFT_REC_MFT) {
|
|
;
|
|
} else if (rno < MFT_REC_FREE) {
|
|
seq = rno;
|
|
} else if (rno >= sbi->mft.used) {
|
|
;
|
|
} else if (mi_read(mi, is_mft)) {
|
|
;
|
|
} else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
|
|
/* Record is reused. Update its sequence number. */
|
|
seq = le16_to_cpu(rec->seq) + 1;
|
|
if (!seq)
|
|
seq = 1;
|
|
}
|
|
|
|
memcpy(rec, sbi->new_rec, sbi->record_size);
|
|
|
|
rec->seq = cpu_to_le16(seq);
|
|
rec->flags = RECORD_FLAG_IN_USE | flags;
|
|
if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
|
|
rec->mft_record = cpu_to_le32(rno);
|
|
|
|
mi->dirty = true;
|
|
|
|
if (!mi->nb.nbufs) {
|
|
struct ntfs_inode *ni = sbi->mft.ni;
|
|
bool lock = false;
|
|
|
|
if (is_mounted(sbi) && !is_mft) {
|
|
down_read(&ni->file.run_lock);
|
|
lock = true;
|
|
}
|
|
|
|
err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
|
|
&mi->nb);
|
|
if (lock)
|
|
up_read(&ni->file.run_lock);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* mi_insert_attr - Reserve space for new attribute.
|
|
*
|
|
* Return: Not full constructed attribute or NULL if not possible to create.
|
|
*/
|
|
struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
|
|
const __le16 *name, u8 name_len, u32 asize,
|
|
u16 name_off)
|
|
{
|
|
size_t tail;
|
|
struct ATTRIB *attr;
|
|
__le16 id;
|
|
struct MFT_REC *rec = mi->mrec;
|
|
struct ntfs_sb_info *sbi = mi->sbi;
|
|
u32 used = le32_to_cpu(rec->used);
|
|
const u16 *upcase = sbi->upcase;
|
|
|
|
/* Can we insert mi attribute? */
|
|
if (used + asize > sbi->record_size)
|
|
return NULL;
|
|
|
|
/*
|
|
* Scan through the list of attributes to find the point
|
|
* at which we should insert it.
|
|
*/
|
|
attr = NULL;
|
|
while ((attr = mi_enum_attr(mi, attr))) {
|
|
int diff = compare_attr(attr, type, name, name_len, upcase);
|
|
|
|
if (diff < 0)
|
|
continue;
|
|
|
|
if (!diff && !is_attr_indexed(attr))
|
|
return NULL;
|
|
break;
|
|
}
|
|
|
|
if (!attr) {
|
|
/* Append. */
|
|
tail = 8;
|
|
attr = Add2Ptr(rec, used - 8);
|
|
} else {
|
|
/* Insert before 'attr'. */
|
|
tail = used - PtrOffset(rec, attr);
|
|
}
|
|
|
|
id = mi_new_attt_id(mi);
|
|
|
|
memmove(Add2Ptr(attr, asize), attr, tail);
|
|
memset(attr, 0, asize);
|
|
|
|
attr->type = type;
|
|
attr->size = cpu_to_le32(asize);
|
|
attr->name_len = name_len;
|
|
attr->name_off = cpu_to_le16(name_off);
|
|
attr->id = id;
|
|
|
|
memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
|
|
rec->used = cpu_to_le32(used + asize);
|
|
|
|
mi->dirty = true;
|
|
|
|
return attr;
|
|
}
|
|
|
|
/*
|
|
* mi_remove_attr - Remove the attribute from record.
|
|
*
|
|
* NOTE: The source attr will point to next attribute.
|
|
*/
|
|
bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
|
|
struct ATTRIB *attr)
|
|
{
|
|
struct MFT_REC *rec = mi->mrec;
|
|
u32 aoff = PtrOffset(rec, attr);
|
|
u32 used = le32_to_cpu(rec->used);
|
|
u32 asize = le32_to_cpu(attr->size);
|
|
|
|
if (aoff + asize > used)
|
|
return false;
|
|
|
|
if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
|
|
u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
|
|
if (!links) {
|
|
/* minor error. Not critical. */
|
|
} else {
|
|
ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
|
|
ni->mi.dirty = true;
|
|
}
|
|
}
|
|
|
|
used -= asize;
|
|
memmove(attr, Add2Ptr(attr, asize), used - aoff);
|
|
rec->used = cpu_to_le32(used);
|
|
mi->dirty = true;
|
|
|
|
return true;
|
|
}
|
|
|
|
/* bytes = "new attribute size" - "old attribute size" */
|
|
bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
|
|
{
|
|
struct MFT_REC *rec = mi->mrec;
|
|
u32 aoff = PtrOffset(rec, attr);
|
|
u32 total, used = le32_to_cpu(rec->used);
|
|
u32 nsize, asize = le32_to_cpu(attr->size);
|
|
u32 rsize = le32_to_cpu(attr->res.data_size);
|
|
int tail = (int)(used - aoff - asize);
|
|
int dsize;
|
|
char *next;
|
|
|
|
if (tail < 0 || aoff >= used)
|
|
return false;
|
|
|
|
if (!bytes)
|
|
return true;
|
|
|
|
total = le32_to_cpu(rec->total);
|
|
next = Add2Ptr(attr, asize);
|
|
|
|
if (bytes > 0) {
|
|
dsize = ALIGN(bytes, 8);
|
|
if (used + dsize > total)
|
|
return false;
|
|
nsize = asize + dsize;
|
|
/* Move tail */
|
|
memmove(next + dsize, next, tail);
|
|
memset(next, 0, dsize);
|
|
used += dsize;
|
|
rsize += dsize;
|
|
} else {
|
|
dsize = ALIGN(-bytes, 8);
|
|
if (dsize > asize)
|
|
return false;
|
|
nsize = asize - dsize;
|
|
memmove(next - dsize, next, tail);
|
|
used -= dsize;
|
|
rsize -= dsize;
|
|
}
|
|
|
|
rec->used = cpu_to_le32(used);
|
|
attr->size = cpu_to_le32(nsize);
|
|
if (!attr->non_res)
|
|
attr->res.data_size = cpu_to_le32(rsize);
|
|
mi->dirty = true;
|
|
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* Pack runs in MFT record.
|
|
* If failed record is not changed.
|
|
*/
|
|
int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
|
|
struct runs_tree *run, CLST len)
|
|
{
|
|
int err = 0;
|
|
struct ntfs_sb_info *sbi = mi->sbi;
|
|
u32 new_run_size;
|
|
CLST plen;
|
|
struct MFT_REC *rec = mi->mrec;
|
|
CLST svcn = le64_to_cpu(attr->nres.svcn);
|
|
u32 used = le32_to_cpu(rec->used);
|
|
u32 aoff = PtrOffset(rec, attr);
|
|
u32 asize = le32_to_cpu(attr->size);
|
|
char *next = Add2Ptr(attr, asize);
|
|
u16 run_off = le16_to_cpu(attr->nres.run_off);
|
|
u32 run_size = asize - run_off;
|
|
u32 tail = used - aoff - asize;
|
|
u32 dsize = sbi->record_size - used;
|
|
|
|
/* Make a maximum gap in current record. */
|
|
memmove(next + dsize, next, tail);
|
|
|
|
/* Pack as much as possible. */
|
|
err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
|
|
&plen);
|
|
if (err < 0) {
|
|
memmove(next, next + dsize, tail);
|
|
return err;
|
|
}
|
|
|
|
new_run_size = ALIGN(err, 8);
|
|
|
|
memmove(next + new_run_size - run_size, next + dsize, tail);
|
|
|
|
attr->size = cpu_to_le32(asize + new_run_size - run_size);
|
|
attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
|
|
rec->used = cpu_to_le32(used + new_run_size - run_size);
|
|
mi->dirty = true;
|
|
|
|
return 0;
|
|
}
|