2020-01-18 09:38:21 +01:00
|
|
|
/*
|
2021-08-17 01:05:06 +02:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2021-05-12 19:17:51 +00:00
|
|
|
* Copyright (c) 2021, sin-ack <sin-ack@protonmail.com>
|
2020-01-18 09:38:21 +01:00
|
|
|
*
|
2021-04-22 01:24:48 -07:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 09:38:21 +01:00
|
|
|
*/
|
|
|
|
|
2019-06-27 13:44:26 +02:00
|
|
|
#include <AK/NonnullRefPtrVector.h>
|
2020-08-24 19:35:19 -06:00
|
|
|
#include <AK/Singleton.h>
|
2020-03-23 13:45:10 +01:00
|
|
|
#include <AK/StringView.h>
|
2020-11-07 09:38:48 +02:00
|
|
|
#include <Kernel/API/InodeWatcherEvent.h>
|
2020-03-01 21:46:51 +02:00
|
|
|
#include <Kernel/FileSystem/Custody.h>
|
2019-06-07 11:43:58 +02:00
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
2019-07-22 20:01:11 +02:00
|
|
|
#include <Kernel/FileSystem/InodeWatcher.h>
|
2021-09-07 13:39:11 +02:00
|
|
|
#include <Kernel/FileSystem/OpenFileDescription.h>
|
2020-03-01 21:46:51 +02:00
|
|
|
#include <Kernel/FileSystem/VirtualFileSystem.h>
|
2020-08-11 19:47:24 +02:00
|
|
|
#include <Kernel/KBufferBuilder.h>
|
2021-08-06 10:45:34 +02:00
|
|
|
#include <Kernel/Memory/SharedInodeVMObject.h>
|
2019-05-16 03:02:37 +02:00
|
|
|
#include <Kernel/Net/LocalSocket.h>
|
2021-07-18 23:29:56 -06:00
|
|
|
#include <Kernel/Process.h>
|
2019-05-16 03:02:37 +02:00
|
|
|
|
2020-02-16 01:27:42 +01:00
|
|
|
namespace Kernel {
|
|
|
|
|
2021-08-22 01:37:17 +02:00
|
|
|
static Singleton<SpinlockProtected<Inode::AllInstancesList>> s_all_instances;
|
2020-07-09 13:51:58 -06:00
|
|
|
|
2021-08-22 01:37:17 +02:00
|
|
|
SpinlockProtected<Inode::AllInstancesList>& Inode::all_instances()
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-08-17 01:05:06 +02:00
|
|
|
return s_all_instances;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-09-11 23:28:59 -04:00
|
|
|
void Inode::sync_all()
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2019-06-27 13:44:26 +02:00
|
|
|
NonnullRefPtrVector<Inode, 32> inodes;
|
2021-08-17 01:05:06 +02:00
|
|
|
Inode::all_instances().with([&](auto& all_inodes) {
|
|
|
|
for (auto& inode : all_inodes) {
|
2019-08-08 13:40:58 +02:00
|
|
|
if (inode.is_metadata_dirty())
|
|
|
|
inodes.append(inode);
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
2021-08-17 01:05:06 +02:00
|
|
|
});
|
2019-05-16 03:02:37 +02:00
|
|
|
|
|
|
|
for (auto& inode : inodes) {
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(inode.is_metadata_dirty());
|
2021-10-21 17:01:52 +02:00
|
|
|
(void)inode.flush_metadata();
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-11 23:28:59 -04:00
|
|
|
void Inode::sync()
|
|
|
|
{
|
|
|
|
if (is_metadata_dirty())
|
2021-10-21 17:01:52 +02:00
|
|
|
(void)flush_metadata();
|
2021-09-11 23:28:59 -04:00
|
|
|
fs().flush_writes();
|
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<NonnullOwnPtr<KBuffer>> Inode::read_entire(OpenFileDescription* description) const
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-09-07 15:54:23 +02:00
|
|
|
auto builder = TRY(KBufferBuilder::try_create());
|
2019-05-16 03:02:37 +02:00
|
|
|
|
2019-07-03 21:17:35 +02:00
|
|
|
u8 buffer[4096];
|
2019-05-16 03:02:37 +02:00
|
|
|
off_t offset = 0;
|
|
|
|
for (;;) {
|
2020-09-11 21:11:07 -06:00
|
|
|
auto buf = UserOrKernelBuffer::for_kernel_buffer(buffer);
|
2021-09-05 16:08:27 +02:00
|
|
|
auto nread = TRY(read_bytes(offset, sizeof(buffer), buf, description));
|
2021-06-16 16:44:15 +02:00
|
|
|
VERIFY(nread <= sizeof(buffer));
|
2021-06-17 11:16:11 +02:00
|
|
|
if (nread == 0)
|
2019-05-16 03:02:37 +02:00
|
|
|
break;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(builder.append((char const*)buffer, nread));
|
2019-05-16 03:02:37 +02:00
|
|
|
offset += nread;
|
2021-06-16 16:44:15 +02:00
|
|
|
if (nread < sizeof(buffer))
|
2020-01-15 22:09:45 +01:00
|
|
|
break;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2020-12-18 14:10:10 +01:00
|
|
|
auto entire_file = builder.build();
|
|
|
|
if (!entire_file)
|
2021-01-20 23:11:17 +01:00
|
|
|
return ENOMEM;
|
2020-12-18 14:10:10 +01:00
|
|
|
return entire_file.release_nonnull();
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<NonnullRefPtr<Custody>> Inode::resolve_as_link(Custody& base, RefPtr<Custody>* out_parent, int options, int symlink_recursion_level) const
|
2020-01-15 13:59:50 +03:00
|
|
|
{
|
|
|
|
// The default implementation simply treats the stored
|
|
|
|
// contents as a path and resolves that. That is, it
|
|
|
|
// behaves exactly how you would expect a symlink to work.
|
2021-09-05 16:08:27 +02:00
|
|
|
auto contents = TRY(read_entire());
|
2021-09-08 18:29:52 +02:00
|
|
|
return VirtualFileSystem::the().resolve_path(StringView { contents->bytes() }, base, out_parent, options, symlink_recursion_level);
|
2020-01-15 13:59:50 +03:00
|
|
|
}
|
|
|
|
|
2021-07-11 00:20:38 +02:00
|
|
|
Inode::Inode(FileSystem& fs, InodeIndex index)
|
|
|
|
: m_file_system(fs)
|
2019-05-16 03:02:37 +02:00
|
|
|
, m_index(index)
|
|
|
|
{
|
2021-08-17 01:05:06 +02:00
|
|
|
Inode::all_instances().with([&](auto& all_inodes) { all_inodes.append(*this); });
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Inode::~Inode()
|
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->unregister_by_inode({}, identifier());
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void Inode::will_be_destroyed()
|
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2019-05-16 03:02:37 +02:00
|
|
|
if (m_metadata_dirty)
|
2021-10-21 17:01:52 +02:00
|
|
|
(void)flush_metadata();
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::set_atime(time_t)
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-04-30 15:51:06 +02:00
|
|
|
return ENOTIMPL;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::set_ctime(time_t)
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-04-30 15:51:06 +02:00
|
|
|
return ENOTIMPL;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::set_mtime(time_t)
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-04-30 15:51:06 +02:00
|
|
|
return ENOTIMPL;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::increment_link_count()
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-01-20 23:11:17 +01:00
|
|
|
return ENOTIMPL;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::decrement_link_count()
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-01-20 23:11:17 +01:00
|
|
|
return ENOTIMPL;
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2022-02-14 01:46:34 +02:00
|
|
|
ErrorOr<void> Inode::set_shared_vmobject(Memory::SharedInodeVMObject& vmobject)
|
2019-05-16 03:02:37 +02:00
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2022-02-14 01:46:34 +02:00
|
|
|
m_shared_vmobject = TRY(vmobject.try_make_weak_ptr<Memory::SharedInodeVMObject>());
|
|
|
|
return {};
|
2019-05-16 03:02:37 +02:00
|
|
|
}
|
|
|
|
|
2022-02-07 12:57:57 +01:00
|
|
|
RefPtr<LocalSocket> Inode::bound_socket() const
|
|
|
|
{
|
|
|
|
return m_bound_socket;
|
|
|
|
}
|
|
|
|
|
2019-05-16 03:02:37 +02:00
|
|
|
bool Inode::bind_socket(LocalSocket& socket)
|
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2022-02-07 12:57:57 +01:00
|
|
|
if (m_bound_socket)
|
2020-01-30 22:15:45 +01:00
|
|
|
return false;
|
2022-02-07 12:57:57 +01:00
|
|
|
m_bound_socket = socket;
|
2019-05-16 03:02:37 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Inode::unbind_socket()
|
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2022-02-07 12:57:57 +01:00
|
|
|
if (!m_bound_socket)
|
2020-01-30 22:15:45 +01:00
|
|
|
return false;
|
2022-02-07 12:57:57 +01:00
|
|
|
m_bound_socket = nullptr;
|
2019-05-16 03:02:37 +02:00
|
|
|
return true;
|
|
|
|
}
|
2019-07-22 20:01:11 +02:00
|
|
|
|
2022-01-25 15:13:59 +02:00
|
|
|
ErrorOr<void> Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
2019-07-22 20:01:11 +02:00
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
return m_watchers.with([&](auto& watchers) -> ErrorOr<void> {
|
|
|
|
VERIFY(!watchers.contains(&watcher));
|
|
|
|
TRY(watchers.try_set(&watcher));
|
|
|
|
return {};
|
|
|
|
});
|
2019-07-22 20:01:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
|
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.with([&](auto& watchers) {
|
|
|
|
VERIFY(watchers.contains(&watcher));
|
|
|
|
watchers.remove(&watcher);
|
|
|
|
});
|
2019-07-22 20:01:11 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<NonnullRefPtr<FIFO>> Inode::fifo()
|
2020-07-16 15:23:03 -06:00
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(metadata().is_fifo());
|
2020-07-16 15:23:03 -06:00
|
|
|
|
|
|
|
// FIXME: Release m_fifo when it is closed by all readers and writers
|
2021-09-07 13:56:10 +02:00
|
|
|
if (!m_fifo)
|
|
|
|
m_fifo = TRY(FIFO::try_create(metadata().uid));
|
2020-07-16 15:23:03 -06:00
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
return NonnullRefPtr { *m_fifo };
|
2020-07-16 15:23:03 -06:00
|
|
|
}
|
|
|
|
|
2019-07-22 20:01:11 +02:00
|
|
|
void Inode::set_metadata_dirty(bool metadata_dirty)
|
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2021-01-17 21:32:59 +01:00
|
|
|
|
|
|
|
if (metadata_dirty) {
|
|
|
|
// Sanity check.
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(!fs().is_readonly());
|
2021-01-17 21:32:59 +01:00
|
|
|
}
|
|
|
|
|
2019-07-22 20:01:11 +02:00
|
|
|
if (m_metadata_dirty == metadata_dirty)
|
|
|
|
return;
|
|
|
|
|
|
|
|
m_metadata_dirty = metadata_dirty;
|
|
|
|
if (m_metadata_dirty) {
|
|
|
|
// FIXME: Maybe we should hook into modification events somewhere else, I'm not sure where.
|
|
|
|
// We don't always end up on this particular code path, for instance when writing to an ext2fs file.
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::MetadataModified);
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2019-07-22 20:01:11 +02:00
|
|
|
}
|
|
|
|
}
|
2020-02-16 01:27:42 +01:00
|
|
|
|
2022-01-11 22:04:53 +02:00
|
|
|
void Inode::did_add_child(InodeIdentifier, StringView name)
|
2021-05-12 19:17:51 +00:00
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::ChildCreated, name);
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2021-05-12 19:17:51 +00:00
|
|
|
}
|
|
|
|
|
2022-01-11 22:04:53 +02:00
|
|
|
void Inode::did_remove_child(InodeIdentifier, StringView name)
|
2021-05-12 19:17:51 +00:00
|
|
|
{
|
|
|
|
if (name == "." || name == "..") {
|
|
|
|
// These are just aliases and are not interesting to userspace.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::ChildDeleted, name);
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2021-05-12 19:17:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Inode::did_modify_contents()
|
2020-07-04 13:36:55 +02:00
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::ContentModified);
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2020-07-04 13:36:55 +02:00
|
|
|
}
|
|
|
|
|
2021-05-12 19:17:51 +00:00
|
|
|
void Inode::did_delete_self()
|
2020-07-04 13:36:55 +02:00
|
|
|
{
|
2022-02-03 01:39:49 +01:00
|
|
|
m_watchers.for_each([&](auto& watcher) {
|
2021-07-21 21:23:39 +02:00
|
|
|
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::Deleted);
|
2022-02-03 01:39:49 +01:00
|
|
|
});
|
2020-07-04 13:36:55 +02:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::prepare_to_write_data()
|
2020-04-04 19:46:55 +02:00
|
|
|
{
|
|
|
|
// FIXME: It's a poor design that filesystems are expected to call this before writing out data.
|
2021-07-11 00:25:24 +02:00
|
|
|
// We should funnel everything through an interface at the VirtualFileSystem layer so this can happen from a single place.
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
2020-04-04 19:46:55 +02:00
|
|
|
if (fs().is_readonly())
|
2021-01-20 23:11:17 +01:00
|
|
|
return EROFS;
|
2020-04-04 19:46:55 +02:00
|
|
|
auto metadata = this->metadata();
|
|
|
|
if (metadata.is_setuid() || metadata.is_setgid()) {
|
2021-01-10 15:17:54 +01:00
|
|
|
dbgln("Inode::prepare_to_write_data(): Stripping SUID/SGID bits from {}", identifier());
|
2020-04-04 19:46:55 +02:00
|
|
|
return chmod(metadata.mode & ~(04000 | 02000));
|
|
|
|
}
|
2021-11-08 00:51:39 +01:00
|
|
|
return {};
|
2020-04-04 19:46:55 +02:00
|
|
|
}
|
|
|
|
|
2021-08-06 13:49:36 +02:00
|
|
|
RefPtr<Memory::SharedInodeVMObject> Inode::shared_vmobject() const
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
|
|
|
{
|
2021-07-18 01:13:34 +02:00
|
|
|
MutexLocker locker(m_inode_lock);
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
|
|
|
return m_shared_vmobject.strong_ref();
|
|
|
|
}
|
|
|
|
|
2021-07-18 23:29:56 -06:00
|
|
|
template<typename T>
|
|
|
|
static inline bool range_overlap(T start1, T len1, T start2, T len2)
|
|
|
|
{
|
|
|
|
return ((start1 < start2 + len2) || len2 == 0) && ((start2 < start1 + len1) || len1 == 0);
|
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
static inline ErrorOr<void> normalize_flock(OpenFileDescription const& description, flock& lock)
|
2021-07-18 23:29:56 -06:00
|
|
|
{
|
|
|
|
off_t start;
|
|
|
|
switch (lock.l_whence) {
|
|
|
|
case SEEK_SET:
|
|
|
|
start = lock.l_start;
|
|
|
|
break;
|
|
|
|
case SEEK_CUR:
|
|
|
|
start = description.offset() + lock.l_start;
|
|
|
|
break;
|
|
|
|
case SEEK_END:
|
|
|
|
// FIXME: Implement SEEK_END and negative lengths.
|
|
|
|
return ENOTSUP;
|
|
|
|
default:
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
lock = { lock.l_type, SEEK_SET, start, lock.l_len, 0 };
|
2021-11-08 00:51:39 +01:00
|
|
|
return {};
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2022-05-13 12:20:57 +02:00
|
|
|
ErrorOr<void> Inode::can_apply_flock(OpenFileDescription const&, flock const& new_lock) const
|
2021-07-18 23:29:56 -06:00
|
|
|
{
|
|
|
|
VERIFY(new_lock.l_whence == SEEK_SET);
|
|
|
|
|
2022-05-13 12:20:57 +02:00
|
|
|
if (new_lock.l_type == F_UNLCK)
|
|
|
|
return {};
|
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
return m_flocks.with([&](auto& flocks) -> ErrorOr<void> {
|
|
|
|
for (auto const& lock : flocks) {
|
|
|
|
if (!range_overlap(lock.start, lock.len, new_lock.l_start, new_lock.l_len))
|
|
|
|
continue;
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
if (new_lock.l_type == F_RDLCK && lock.type == F_WRLCK)
|
|
|
|
return EAGAIN;
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
if (new_lock.l_type == F_WRLCK)
|
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
});
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::apply_flock(Process const& process, OpenFileDescription const& description, Userspace<flock const*> input_lock)
|
2021-07-18 23:29:56 -06:00
|
|
|
{
|
2021-12-17 09:12:20 +01:00
|
|
|
auto new_lock = TRY(copy_typed_from_user(input_lock));
|
2021-09-05 16:08:27 +02:00
|
|
|
TRY(normalize_flock(description, new_lock));
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
return m_flocks.with([&](auto& flocks) -> ErrorOr<void> {
|
|
|
|
TRY(can_apply_flock(description, new_lock));
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
if (new_lock.l_type == F_UNLCK) {
|
|
|
|
for (size_t i = 0; i < flocks.size(); ++i) {
|
|
|
|
if (&description == flocks[i].owner && flocks[i].start == new_lock.l_start && flocks[i].len == new_lock.l_len) {
|
|
|
|
flocks.remove(i);
|
|
|
|
}
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
2022-05-13 12:20:57 +02:00
|
|
|
|
|
|
|
// Judging by the Linux implementation, unlocking a non-existent lock also works.
|
|
|
|
return {};
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
TRY(flocks.try_append(Flock { new_lock.l_start, new_lock.l_len, &description, process.pid().value(), new_lock.l_type }));
|
|
|
|
return {};
|
|
|
|
});
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2021-11-08 00:51:39 +01:00
|
|
|
ErrorOr<void> Inode::get_flock(OpenFileDescription const& description, Userspace<flock*> reference_lock) const
|
2021-07-18 23:29:56 -06:00
|
|
|
{
|
2021-09-05 16:08:27 +02:00
|
|
|
flock lookup = {};
|
2021-09-05 17:38:37 +02:00
|
|
|
TRY(copy_from_user(&lookup, reference_lock));
|
2021-09-05 16:08:27 +02:00
|
|
|
TRY(normalize_flock(description, lookup));
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
return m_flocks.with([&](auto& flocks) {
|
|
|
|
for (auto const& lock : flocks) {
|
|
|
|
if (!range_overlap(lock.start, lock.len, lookup.l_start, lookup.l_len))
|
|
|
|
continue;
|
2021-07-18 23:29:56 -06:00
|
|
|
|
2022-06-17 19:06:05 +02:00
|
|
|
// Locks with the same owner can't conflict with each other.
|
|
|
|
if (lock.pid == Process::current().pid())
|
|
|
|
continue;
|
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
if ((lookup.l_type == F_RDLCK && lock.type == F_WRLCK) || lookup.l_type == F_WRLCK) {
|
|
|
|
lookup = { lock.type, SEEK_SET, lock.start, lock.len, lock.pid };
|
|
|
|
return copy_to_user(reference_lock, &lookup);
|
|
|
|
}
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2022-02-03 17:28:45 +01:00
|
|
|
lookup.l_type = F_UNLCK;
|
|
|
|
return copy_to_user(reference_lock, &lookup);
|
|
|
|
});
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
|
|
|
|
2021-09-07 13:39:11 +02:00
|
|
|
void Inode::remove_flocks_for_description(OpenFileDescription const& description)
|
2021-07-18 23:29:56 -06:00
|
|
|
{
|
2022-02-03 17:28:45 +01:00
|
|
|
m_flocks.with([&](auto& flocks) {
|
|
|
|
flocks.remove_all_matching([&](auto& entry) { return entry.owner == &description; });
|
|
|
|
});
|
2021-07-18 23:29:56 -06:00
|
|
|
}
|
2022-02-03 17:28:45 +01:00
|
|
|
|
2022-02-03 01:39:49 +01:00
|
|
|
bool Inode::has_watchers() const
|
|
|
|
{
|
|
|
|
return !m_watchers.with([&](auto& watchers) { return watchers.is_empty(); });
|
|
|
|
}
|
|
|
|
|
2020-02-16 01:27:42 +01:00
|
|
|
}
|