2019-08-07 18:06:17 +02:00
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
|
|
|
#include <Kernel/VM/InodeVMObject.h>
|
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
|
|
|
#include <Kernel/VM/Region.h>
|
|
|
|
|
|
|
|
NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
|
|
|
|
{
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
if (inode.vmo())
|
|
|
|
return *inode.vmo();
|
|
|
|
auto vmo = adopt(*new InodeVMObject(inode));
|
|
|
|
vmo->inode().set_vmo(*vmo);
|
|
|
|
return vmo;
|
|
|
|
}
|
|
|
|
|
|
|
|
NonnullRefPtr<VMObject> InodeVMObject::clone()
|
|
|
|
{
|
|
|
|
return adopt(*new InodeVMObject(*this));
|
|
|
|
}
|
|
|
|
|
|
|
|
InodeVMObject::InodeVMObject(Inode& inode)
|
2019-08-07 20:12:50 +02:00
|
|
|
: VMObject(inode.size())
|
2019-08-07 18:06:17 +02:00
|
|
|
, m_inode(inode)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
InodeVMObject::InodeVMObject(const InodeVMObject& other)
|
|
|
|
: VMObject(other)
|
|
|
|
, m_inode(other.m_inode)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
InodeVMObject::~InodeVMObject()
|
|
|
|
{
|
|
|
|
ASSERT(inode().vmo() == this);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
|
|
|
|
{
|
|
|
|
dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
|
|
|
|
m_inode->fsid(), m_inode->index(),
|
|
|
|
old_size, new_size);
|
|
|
|
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
|
2019-08-07 20:12:50 +02:00
|
|
|
auto new_page_count = PAGE_ROUND_UP(new_size);
|
|
|
|
m_physical_pages.resize(new_page_count);
|
2019-08-07 18:06:17 +02:00
|
|
|
|
|
|
|
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
|
|
|
|
for_each_region([](Region& region) {
|
|
|
|
ASSERT(region.page_directory());
|
|
|
|
MM.remap_region(*region.page_directory(), region);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
|
|
|
|
{
|
|
|
|
(void)size;
|
|
|
|
(void)data;
|
|
|
|
InterruptDisabler disabler;
|
|
|
|
ASSERT(offset >= 0);
|
|
|
|
|
|
|
|
// FIXME: Only invalidate the parts that actually changed.
|
|
|
|
for (auto& physical_page : m_physical_pages)
|
|
|
|
physical_page = nullptr;
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
size_t current_offset = offset;
|
|
|
|
size_t remaining_bytes = size;
|
|
|
|
const u8* data_ptr = data;
|
|
|
|
|
|
|
|
auto to_page_index = [] (size_t offset) -> size_t {
|
|
|
|
return offset / PAGE_SIZE;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (current_offset & PAGE_MASK) {
|
|
|
|
size_t page_index = to_page_index(current_offset);
|
|
|
|
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
|
|
|
|
if (m_physical_pages[page_index]) {
|
|
|
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
|
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
|
|
|
MM.unquickmap_page();
|
|
|
|
}
|
|
|
|
current_offset += bytes_to_copy;
|
|
|
|
data += bytes_to_copy;
|
|
|
|
remaining_bytes -= bytes_to_copy;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
|
|
|
|
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
|
|
|
|
if (m_physical_pages[page_index]) {
|
|
|
|
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
|
|
|
|
memcpy(ptr, data_ptr, bytes_to_copy);
|
|
|
|
MM.unquickmap_page();
|
|
|
|
}
|
|
|
|
current_offset += bytes_to_copy;
|
|
|
|
data += bytes_to_copy;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
|
|
|
|
for_each_region([](Region& region) {
|
|
|
|
ASSERT(region.page_directory());
|
|
|
|
MM.remap_region(*region.page_directory(), region);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Callback>
|
|
|
|
void VMObject::for_each_region(Callback callback)
|
|
|
|
{
|
|
|
|
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
|
|
|
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
2019-08-08 13:40:58 +02:00
|
|
|
for (auto& region : MM.m_user_regions) {
|
|
|
|
if (®ion.vmo() == this)
|
|
|
|
callback(region);
|
2019-08-07 18:06:17 +02:00
|
|
|
}
|
2019-08-08 13:40:58 +02:00
|
|
|
for (auto& region : MM.m_kernel_regions) {
|
|
|
|
if (®ion.vmo() == this)
|
|
|
|
callback(region);
|
2019-08-07 18:06:17 +02:00
|
|
|
}
|
|
|
|
}
|