mirror of
https://github.com/SerenityOS/serenity.git
synced 2025-01-24 18:32:28 -05:00
Kernel: Always hold space lock while calculating memory statistics
And put the locker at the top of the functions for clarity.
This commit is contained in:
parent
8bda30edd2
commit
9ca42c4c0e
1 changed files with 10 additions and 12 deletions
|
@ -222,11 +222,11 @@ void Space::remove_all_regions(Badge<Process>)
|
|||
|
||||
size_t Space::amount_dirty_private() const
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
|
||||
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
|
||||
// That's probably a situation that needs to be looked at in general.
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (auto& region : m_regions) {
|
||||
if (!region.is_shared())
|
||||
amount += region.amount_dirty();
|
||||
|
@ -236,13 +236,11 @@ size_t Space::amount_dirty_private() const
|
|||
|
||||
size_t Space::amount_clean_inode() const
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
HashTable<const InodeVMObject*> vmobjects;
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_inode())
|
||||
vmobjects.set(&static_cast<const InodeVMObject&>(region.vmobject()));
|
||||
}
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_inode())
|
||||
vmobjects.set(&static_cast<const InodeVMObject&>(region.vmobject()));
|
||||
}
|
||||
size_t amount = 0;
|
||||
for (auto& vmobject : vmobjects)
|
||||
|
@ -252,8 +250,8 @@ size_t Space::amount_clean_inode() const
|
|||
|
||||
size_t Space::amount_virtual() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.size();
|
||||
}
|
||||
|
@ -262,9 +260,9 @@ size_t Space::amount_virtual() const
|
|||
|
||||
size_t Space::amount_resident() const
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
// FIXME: This will double count if multiple regions use the same physical page.
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.amount_resident();
|
||||
}
|
||||
|
@ -273,12 +271,12 @@ size_t Space::amount_resident() const
|
|||
|
||||
size_t Space::amount_shared() const
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
// FIXME: This will double count if multiple regions use the same physical page.
|
||||
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
|
||||
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
|
||||
// so that every Region contributes +1 ref to each of its PhysicalPages.
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (auto& region : m_regions) {
|
||||
amount += region.amount_shared();
|
||||
}
|
||||
|
@ -287,8 +285,8 @@ size_t Space::amount_shared() const
|
|||
|
||||
size_t Space::amount_purgeable_volatile() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||
amount += region.amount_resident();
|
||||
|
@ -298,8 +296,8 @@ size_t Space::amount_purgeable_volatile() const
|
|||
|
||||
size_t Space::amount_purgeable_nonvolatile() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
size_t amount = 0;
|
||||
for (auto& region : m_regions) {
|
||||
if (region.vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||
amount += region.amount_resident();
|
||||
|
|
Loading…
Add table
Reference in a new issue