Kernel: Make AsyncDeviceRequest sub-req management alloc free

The previous implementation could allocate on insertion into the completed / pending
sub request vectors. There's no reason these can't be intrusive lists instead.

This is a very minor step towards improving the ability to handle OOM, as tracked by #6369
It might also help improve performance on the IO path in certain situations.
I'll benchmark that later.
This commit is contained in:
Brian Gianforcaro 2021-04-19 23:55:44 -07:00 committed by Linus Groh
parent f1d832e5ea
commit 033b287635
Notes: sideshowbarker 2024-07-18 19:17:48 +09:00
2 changed files with 22 additions and 19 deletions

View file

@ -49,10 +49,13 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
// sub-requests should be completed (either succeeded, failed, or cancelled).
// Which means there should be no more pending sub-requests and the
// entire AsyncDeviceRequest hierarchy should be immutable.
for (auto& sub_request : m_sub_requests_complete) {
VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
VERIFY(sub_request.m_parent_request == this);
sub_request.m_parent_request = nullptr;
while (!m_sub_requests_complete.is_empty()) {
// Note: sub_request is ref-counted, and we use this specific pattern
// to allow make sure the refcount is dropped properly.
auto sub_request = m_sub_requests_complete.take_first();
VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
VERIFY(sub_request->m_parent_request == this);
sub_request->m_parent_request = nullptr;
}
}
@ -104,24 +107,19 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
{
ScopedSpinLock lock(m_lock);
VERIFY(m_result == Started);
size_t index;
for (index = 0; index < m_sub_requests_pending.size(); index++) {
if (&m_sub_requests_pending[index] == &sub_request) {
NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
m_sub_requests_pending.remove(index);
m_sub_requests_complete.append(move(request));
break;
if (m_sub_requests_pending.contains(sub_request)) {
// Note: append handles removing from any previous intrusive list internally.
m_sub_requests_complete.append(sub_request);
}
}
VERIFY(index < m_sub_requests_pending.size());
all_completed = m_sub_requests_pending.is_empty();
if (all_completed) {
// Aggregate any errors
bool any_failures = false;
bool any_memory_faults = false;
for (index = 0; index < m_sub_requests_complete.size(); index++) {
auto& sub_request = m_sub_requests_complete[index];
auto sub_result = sub_request.get_request_result();
for (auto& com_sub_request : m_sub_requests_complete) {
auto sub_result = com_sub_request.get_request_result();
VERIFY(is_completed_result(sub_result));
switch (sub_result) {
case Failure:

View file

@ -26,7 +26,8 @@
#pragma once
#include <AK/NonnullRefPtrVector.h>
#include <AK/IntrusiveList.h>
#include <AK/NonnullRefPtr.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/UserOrKernelBuffer.h>
@ -160,8 +161,12 @@ private:
AsyncDeviceRequest* m_parent_request { nullptr };
RequestResult m_result { Pending };
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_pending;
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_complete;
IntrusiveListNode<AsyncDeviceRequest, RefPtr<AsyncDeviceRequest>> m_list_node;
typedef IntrusiveList<AsyncDeviceRequest, RefPtr<AsyncDeviceRequest>, &AsyncDeviceRequest::m_list_node> AsyncDeviceSubRequestList;
AsyncDeviceSubRequestList m_sub_requests_pending;
AsyncDeviceSubRequestList m_sub_requests_complete;
WaitQueue m_queue;
NonnullRefPtr<Process> m_process;
void* m_private { nullptr };