mirror of
https://github.com/SerenityOS/serenity.git
synced 2025-01-23 09:51:57 -05:00
Kernel: Switch to SpinlockProtected to protect NVMeQueue's requests map
This helps ensure no one accidentally accesses m_requests without first locking it's spinlock. In fact this change fixed such a case, since process_cq() implicitly assumed the caller locked the lock, which was not the case for NVMePollQueue::submit_sqe().
This commit is contained in:
parent
263127f21a
commit
45aee20ea9
3 changed files with 61 additions and 60 deletions
|
@ -31,7 +31,6 @@ void NVMeInterruptQueue::initialize_interrupt_queue()
|
||||||
|
|
||||||
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
|
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_request_lock);
|
|
||||||
return process_cq() ? true : false;
|
return process_cq() ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,14 +46,15 @@ void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
|
||||||
});
|
});
|
||||||
|
|
||||||
if (work_item_creation_result.is_error()) {
|
if (work_item_creation_result.is_error()) {
|
||||||
SpinlockLocker lock(m_request_lock);
|
m_requests.with([cmdid, status](auto& requests) {
|
||||||
auto& request_pdu = m_requests.get(cmdid).release_value();
|
auto& request_pdu = requests.get(cmdid).release_value();
|
||||||
auto current_request = request_pdu.request;
|
auto current_request = request_pdu.request;
|
||||||
|
|
||||||
current_request->complete(AsyncDeviceRequest::OutOfMemory);
|
current_request->complete(AsyncDeviceRequest::OutOfMemory);
|
||||||
if (request_pdu.end_io_handler)
|
if (request_pdu.end_io_handler)
|
||||||
request_pdu.end_io_handler(status);
|
request_pdu.end_io_handler(status);
|
||||||
request_pdu.clear();
|
request_pdu.clear();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,9 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_regio
|
||||||
, m_rw_dma_page(rw_dma_page)
|
, m_rw_dma_page(rw_dma_page)
|
||||||
|
|
||||||
{
|
{
|
||||||
m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
|
m_requests.with([q_depth](auto& requests) {
|
||||||
|
requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
|
||||||
|
});
|
||||||
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
|
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||||
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
|
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||||
}
|
}
|
||||||
|
@ -66,21 +68,23 @@ void NVMeQueue::update_cqe_head()
|
||||||
u32 NVMeQueue::process_cq()
|
u32 NVMeQueue::process_cq()
|
||||||
{
|
{
|
||||||
u32 nr_of_processed_cqes = 0;
|
u32 nr_of_processed_cqes = 0;
|
||||||
while (cqe_available()) {
|
m_requests.with([this, &nr_of_processed_cqes](auto& requests) {
|
||||||
u16 status;
|
while (cqe_available()) {
|
||||||
u16 cmdid;
|
u16 status;
|
||||||
++nr_of_processed_cqes;
|
u16 cmdid;
|
||||||
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
|
++nr_of_processed_cqes;
|
||||||
cmdid = m_cqe_array[m_cq_head].command_id;
|
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
|
||||||
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
|
cmdid = m_cqe_array[m_cq_head].command_id;
|
||||||
|
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
|
||||||
|
|
||||||
if (!m_requests.contains(cmdid)) {
|
if (!requests.contains(cmdid)) {
|
||||||
dmesgln("Bogus cmd id: {}", cmdid);
|
dmesgln("Bogus cmd id: {}", cmdid);
|
||||||
VERIFY_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
|
}
|
||||||
|
complete_current_request(cmdid, status);
|
||||||
|
update_cqe_head();
|
||||||
}
|
}
|
||||||
complete_current_request(cmdid, status);
|
});
|
||||||
update_cqe_head();
|
|
||||||
}
|
|
||||||
if (nr_of_processed_cqes) {
|
if (nr_of_processed_cqes) {
|
||||||
update_cq_doorbell();
|
update_cq_doorbell();
|
||||||
}
|
}
|
||||||
|
@ -106,35 +110,36 @@ void NVMeQueue::submit_sqe(NVMeSubmission& sub)
|
||||||
|
|
||||||
void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
|
void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_request_lock);
|
m_requests.with([this, cmdid, status](auto& requests) {
|
||||||
auto& request_pdu = m_requests.get(cmdid).release_value();
|
auto& request_pdu = requests.get(cmdid).release_value();
|
||||||
auto current_request = request_pdu.request;
|
auto current_request = request_pdu.request;
|
||||||
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
|
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
|
||||||
|
|
||||||
ScopeGuard guard = [&req_result, status, &request_pdu] {
|
ScopeGuard guard = [&req_result, status, &request_pdu] {
|
||||||
if (request_pdu.request)
|
if (request_pdu.request)
|
||||||
request_pdu.request->complete(req_result);
|
request_pdu.request->complete(req_result);
|
||||||
if (request_pdu.end_io_handler)
|
if (request_pdu.end_io_handler)
|
||||||
request_pdu.end_io_handler(status);
|
request_pdu.end_io_handler(status);
|
||||||
request_pdu.clear();
|
request_pdu.clear();
|
||||||
};
|
};
|
||||||
|
|
||||||
// There can be submission without any request associated with it such as with
|
// There can be submission without any request associated with it such as with
|
||||||
// admin queue commands during init. If there is no request, we are done
|
// admin queue commands during init. If there is no request, we are done
|
||||||
if (!current_request)
|
if (!current_request)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
req_result = AsyncBlockDeviceRequest::Failure;
|
req_result = AsyncBlockDeviceRequest::Failure;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
|
|
||||||
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
|
|
||||||
req_result = AsyncBlockDeviceRequest::MemoryFault;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
|
||||||
|
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
|
||||||
|
req_result = AsyncBlockDeviceRequest::MemoryFault;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||||
|
@ -144,10 +149,9 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||||
u16 cid = get_request_cid();
|
u16 cid = get_request_cid();
|
||||||
sub.cmdid = cid;
|
sub.cmdid = cid;
|
||||||
|
|
||||||
{
|
m_requests.with([this, &sub, &cmd_status](auto& requests) {
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
||||||
m_requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
});
|
||||||
}
|
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
|
|
||||||
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
|
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
|
||||||
|
@ -167,10 +171,9 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||||
sub.cmdid = get_request_cid();
|
sub.cmdid = get_request_cid();
|
||||||
|
|
||||||
{
|
m_requests.with([&sub, &request](auto& requests) {
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
requests.set(sub.cmdid, { request, nullptr });
|
||||||
m_requests.set(sub.cmdid, { request, nullptr });
|
});
|
||||||
}
|
|
||||||
|
|
||||||
full_memory_barrier();
|
full_memory_barrier();
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
|
@ -188,10 +191,9 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||||
sub.cmdid = get_request_cid();
|
sub.cmdid = get_request_cid();
|
||||||
|
|
||||||
{
|
m_requests.with([&sub, &request](auto& requests) {
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
requests.set(sub.cmdid, { request, nullptr });
|
||||||
m_requests.set(sub.cmdid, { request, nullptr });
|
});
|
||||||
}
|
|
||||||
|
|
||||||
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
||||||
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
|
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
|
||||||
|
|
|
@ -113,9 +113,8 @@ private:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Spinlock<LockRank::Interrupts> m_cq_lock {};
|
Spinlock<LockRank::Interrupts> m_cq_lock {};
|
||||||
HashMap<u16, NVMeIO> m_requests;
|
SpinlockProtected<HashMap<u16, NVMeIO>, LockRank::None> m_requests;
|
||||||
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
|
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
|
||||||
RecursiveSpinlock<LockRank::None> m_request_lock {};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u16 m_qid {};
|
u16 m_qid {};
|
||||||
|
|
Loading…
Reference in a new issue