2020-01-18 09:38:21 +01:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2019-07-28 23:44:01 +10:00
# include <AK/ByteBuffer.h>
2020-08-24 19:35:19 -06:00
# include <AK/Singleton.h>
2020-03-23 13:45:10 +01:00
# include <AK/StringView.h>
2019-05-19 15:54:33 +02:00
# include <Kernel/FileSystem/ProcFS.h>
2020-06-24 14:07:28 -06:00
# include <Kernel/IO.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/Process.h>
2020-12-19 12:50:57 +02:00
# include <Kernel/Storage/IDEChannel.h>
# include <Kernel/Storage/IDEController.h>
# include <Kernel/Storage/PATADiskDevice.h>
2019-06-07 11:43:58 +02:00
# include <Kernel/VM/MemoryManager.h>
2018-11-10 15:15:31 +01:00
2020-02-16 01:27:42 +01:00
namespace Kernel {
2019-07-28 23:44:01 +10:00
# define PATA_PRIMARY_IRQ 14
# define PATA_SECONDARY_IRQ 15
2018-11-10 15:15:31 +01:00
2019-06-07 11:43:58 +02:00
# define ATA_SR_BSY 0x80
# define ATA_SR_DRDY 0x40
# define ATA_SR_DF 0x20
# define ATA_SR_DSC 0x10
# define ATA_SR_DRQ 0x08
# define ATA_SR_CORR 0x04
# define ATA_SR_IDX 0x02
# define ATA_SR_ERR 0x01
# define ATA_ER_BBK 0x80
# define ATA_ER_UNC 0x40
# define ATA_ER_MC 0x20
# define ATA_ER_IDNF 0x10
# define ATA_ER_MCR 0x08
# define ATA_ER_ABRT 0x04
# define ATA_ER_TK0NF 0x02
# define ATA_ER_AMNF 0x01
# define ATA_CMD_READ_PIO 0x20
# define ATA_CMD_READ_PIO_EXT 0x24
# define ATA_CMD_READ_DMA 0xC8
# define ATA_CMD_READ_DMA_EXT 0x25
# define ATA_CMD_WRITE_PIO 0x30
# define ATA_CMD_WRITE_PIO_EXT 0x34
# define ATA_CMD_WRITE_DMA 0xCA
# define ATA_CMD_WRITE_DMA_EXT 0x35
# define ATA_CMD_CACHE_FLUSH 0xE7
# define ATA_CMD_CACHE_FLUSH_EXT 0xEA
# define ATA_CMD_PACKET 0xA0
# define ATA_CMD_IDENTIFY_PACKET 0xA1
# define ATA_CMD_IDENTIFY 0xEC
# define ATAPI_CMD_READ 0xA8
# define ATAPI_CMD_EJECT 0x1B
# define ATA_IDENT_DEVICETYPE 0
# define ATA_IDENT_CYLINDERS 2
# define ATA_IDENT_HEADS 6
# define ATA_IDENT_SECTORS 12
# define ATA_IDENT_SERIAL 20
# define ATA_IDENT_MODEL 54
2019-05-19 03:46:50 +02:00
# define ATA_IDENT_CAPABILITIES 98
2019-06-07 11:43:58 +02:00
# define ATA_IDENT_FIELDVALID 106
# define ATA_IDENT_MAX_LBA 120
# define ATA_IDENT_COMMANDSETS 164
# define ATA_IDENT_MAX_LBA_EXT 200
# define IDE_ATA 0x00
# define IDE_ATAPI 0x01
# define ATA_REG_DATA 0x00
# define ATA_REG_ERROR 0x01
# define ATA_REG_FEATURES 0x01
# define ATA_REG_SECCOUNT0 0x02
# define ATA_REG_LBA0 0x03
# define ATA_REG_LBA1 0x04
# define ATA_REG_LBA2 0x05
# define ATA_REG_HDDEVSEL 0x06
# define ATA_REG_COMMAND 0x07
# define ATA_REG_STATUS 0x07
2021-01-29 19:37:40 +00:00
# define ATA_REG_SECCOUNT1 0x08
# define ATA_REG_LBA3 0x09
# define ATA_REG_LBA4 0x0A
# define ATA_REG_LBA5 0x0B
2019-09-04 11:10:07 +02:00
# define ATA_CTL_CONTROL 0x00
# define ATA_CTL_ALTSTATUS 0x00
# define ATA_CTL_DEVADDRESS 0x01
2019-05-19 03:46:50 +02:00
2021-01-29 19:37:40 +00:00
# define ATA_CAP_LBA 0x200
2019-11-13 19:29:16 +02:00
# define PCI_Mass_Storage_Class 0x1
# define PCI_IDE_Controller_Subclass 0x1
2020-08-24 19:35:19 -06:00
2020-12-19 12:50:57 +02:00
NonnullOwnPtr < IDEChannel > IDEChannel : : create ( const IDEController & controller , IOAddressGroup io_group , ChannelType type , bool force_pio )
2018-10-16 14:17:43 +02:00
{
2020-12-19 12:50:57 +02:00
return make < IDEChannel > ( controller , io_group , type , force_pio ) ;
2018-10-16 14:17:43 +02:00
}
2020-12-19 13:56:12 +02:00
RefPtr < StorageDevice > IDEChannel : : master_device ( ) const
2020-12-19 12:50:57 +02:00
{
return m_master ;
}
2021-01-29 19:37:40 +00:00
2020-12-19 13:56:12 +02:00
RefPtr < StorageDevice > IDEChannel : : slave_device ( ) const
2020-12-19 12:50:57 +02:00
{
return m_slave ;
}
IDEChannel : : IDEChannel ( const IDEController & controller , IOAddressGroup io_group , ChannelType type , bool force_pio )
: IRQHandler ( type = = ChannelType : : Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ )
2021-01-29 19:37:40 +00:00
, m_channel_type ( type )
2020-12-19 12:50:57 +02:00
, m_io_group ( io_group )
, m_parent_controller ( controller )
2018-10-16 14:17:43 +02:00
{
2020-01-22 22:23:50 +01:00
disable_irq ( ) ;
2019-12-01 12:47:53 +01:00
2021-01-29 19:37:40 +00:00
// FIXME: The device may not be capable of DMA.
2020-04-14 22:48:02 +03:00
m_dma_enabled . resource ( ) = ! force_pio ;
2019-08-16 16:35:02 +03:00
ProcFS : : add_sys_bool ( " ide_dma " , m_dma_enabled ) ;
2018-10-16 14:17:43 +02:00
2019-11-13 19:29:16 +02:00
initialize ( force_pio ) ;
2019-07-28 23:44:01 +10:00
detect_disks ( ) ;
2021-02-05 06:51:47 +02:00
// Note: calling to detect_disks could generate an interrupt, clear it if that's the case
clear_pending_interrupts ( ) ;
2020-11-02 11:16:01 -07:00
enable_irq ( ) ;
2018-10-16 14:17:43 +02:00
}
2021-02-05 06:51:47 +02:00
void IDEChannel : : clear_pending_interrupts ( ) const
{
m_io_group . io_base ( ) . offset ( ATA_REG_STATUS ) . in < u8 > ( ) ;
}
2020-12-19 12:50:57 +02:00
IDEChannel : : ~ IDEChannel ( )
2019-05-19 04:40:30 +02:00
{
}
2021-01-29 19:37:40 +00:00
void IDEChannel : : start_request ( AsyncBlockDeviceRequest & request , bool use_dma , bool is_slave , u16 capabilities )
2020-03-06 03:22:53 +02:00
{
2020-11-02 11:16:01 -07:00
ScopedSpinLock lock ( m_request_lock ) ;
2021-01-29 19:37:40 +00:00
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::start_request " ) ;
2021-01-29 19:37:40 +00:00
2020-11-02 11:16:01 -07:00
m_current_request = & request ;
m_current_request_block_index = 0 ;
m_current_request_uses_dma = use_dma ;
m_current_request_flushing_cache = false ;
if ( request . request_type ( ) = = AsyncBlockDeviceRequest : : Read ) {
if ( use_dma )
2021-01-29 19:37:40 +00:00
ata_read_sectors_with_dma ( is_slave , capabilities ) ;
2020-11-02 11:16:01 -07:00
else
2021-01-29 19:37:40 +00:00
ata_read_sectors ( is_slave , capabilities ) ;
2020-11-02 11:16:01 -07:00
} else {
if ( use_dma )
2021-01-29 19:37:40 +00:00
ata_write_sectors_with_dma ( is_slave , capabilities ) ;
2020-11-02 11:16:01 -07:00
else
2021-01-29 19:37:40 +00:00
ata_write_sectors ( is_slave , capabilities ) ;
2020-11-02 11:16:01 -07:00
}
}
2020-12-19 12:50:57 +02:00
void IDEChannel : : complete_current_request ( AsyncDeviceRequest : : RequestResult result )
2020-11-02 11:16:01 -07:00
{
// NOTE: this may be called from the interrupt handler!
ASSERT ( m_current_request ) ;
ASSERT ( m_request_lock . is_locked ( ) ) ;
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
Processor : : deferred_call_queue ( [ this , result ] ( ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::complete_current_request result: {} " , ( int ) result ) ;
2020-11-02 11:16:01 -07:00
ASSERT ( m_current_request ) ;
auto & request = * m_current_request ;
m_current_request = nullptr ;
if ( m_current_request_uses_dma ) {
if ( result = = AsyncDeviceRequest : : Success ) {
if ( request . request_type ( ) = = AsyncBlockDeviceRequest : : Read ) {
if ( ! request . write_to_buffer ( request . buffer ( ) , m_dma_buffer_page - > paddr ( ) . offset ( 0xc0000000 ) . as_ptr ( ) , 512 * request . block_count ( ) ) ) {
request . complete ( AsyncDeviceRequest : : MemoryFault ) ;
return ;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . offset ( 2 ) . out < u8 > ( m_io_group . bus_master_base ( ) . offset ( 2 ) . in < u8 > ( ) | 0x6 ) ;
2020-11-02 11:16:01 -07:00
}
}
request . complete ( result ) ;
} ) ;
2020-03-06 03:22:53 +02:00
}
2020-12-19 12:50:57 +02:00
void IDEChannel : : initialize ( bool force_pio )
2018-10-16 14:17:43 +02:00
{
2020-12-19 12:50:57 +02:00
m_parent_controller - > enable_pin_based_interrupts ( ) ;
2021-01-29 19:37:40 +00:00
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: {} IO base: {} " , channel_type_string ( ) , m_io_group . io_base ( ) ) ;
dbgln_if ( PATA_DEBUG , " IDEChannel: {} control base: {} " , channel_type_string ( ) , m_io_group . control_base ( ) ) ;
dbgln_if ( PATA_DEBUG , " IDEChannel: {} bus master base: {} " , channel_type_string ( ) , m_io_group . bus_master_base ( ) ) ;
2021-01-29 19:37:40 +00:00
2019-12-27 08:57:58 +11:00
if ( force_pio ) {
2021-01-29 19:37:40 +00:00
dbgln ( " IDEChannel: Requested to force PIO mode; not setting up DMA " ) ;
2019-12-27 08:57:58 +11:00
return ;
}
2020-04-14 22:48:02 +03:00
2019-12-27 08:57:58 +11:00
// Let's try to set up DMA transfers.
2020-12-19 12:50:57 +02:00
PCI : : enable_bus_mastering ( m_parent_controller - > pci_address ( ) ) ;
2020-04-14 22:48:02 +03:00
m_prdt_page = MM . allocate_supervisor_physical_page ( ) ;
2020-01-17 19:59:20 +01:00
prdt ( ) . end_of_table = 0x8000 ;
2019-12-27 08:57:58 +11:00
m_dma_buffer_page = MM . allocate_supervisor_physical_page ( ) ;
2018-10-16 14:17:43 +02:00
}
2019-07-03 21:17:35 +02:00
static void print_ide_status ( u8 status )
2018-11-10 15:15:31 +01:00
{
2020-12-19 12:50:57 +02:00
klog ( ) < < " IDEChannel: print_ide_status: DRQ= " < < ( ( status & ATA_SR_DRQ ) ! = 0 ) < < " BSY= " < < ( ( status & ATA_SR_BSY ) ! = 0 ) < < " DRDY= " < < ( ( status & ATA_SR_DRDY ) ! = 0 ) < < " DSC= " < < ( ( status & ATA_SR_DSC ) ! = 0 ) < < " DF= " < < ( ( status & ATA_SR_DF ) ! = 0 ) < < " CORR= " < < ( ( status & ATA_SR_CORR ) ! = 0 ) < < " IDX= " < < ( ( status & ATA_SR_IDX ) ! = 0 ) < < " ERR= " < < ( ( status & ATA_SR_ERR ) ! = 0 ) ;
2018-11-10 15:15:31 +01:00
}
2021-01-29 19:37:40 +00:00
void IDEChannel : : try_disambiguate_error ( )
{
dbgln ( " IDEChannel: Error cause: " ) ;
switch ( m_device_error ) {
case ATA_ER_BBK :
dbgln ( " IDEChannel: - Bad block " ) ;
break ;
case ATA_ER_UNC :
dbgln ( " IDEChannel: - Uncorrectable data " ) ;
break ;
case ATA_ER_MC :
dbgln ( " IDEChannel: - Media changed " ) ;
break ;
case ATA_ER_IDNF :
dbgln ( " IDEChannel: - ID mark not found " ) ;
break ;
case ATA_ER_MCR :
dbgln ( " IDEChannel: - Media change request " ) ;
break ;
case ATA_ER_ABRT :
dbgln ( " IDEChannel: - Command aborted " ) ;
break ;
case ATA_ER_TK0NF :
dbgln ( " IDEChannel: - Track 0 not found " ) ;
break ;
case ATA_ER_AMNF :
dbgln ( " IDEChannel: - No address mark " ) ;
break ;
default :
dbgln ( " IDEChannel: - No one knows " ) ;
break ;
}
}
2020-12-19 12:50:57 +02:00
void IDEChannel : : handle_irq ( const RegisterState & )
2018-11-10 15:15:31 +01:00
{
2020-12-19 12:50:57 +02:00
u8 status = m_io_group . io_base ( ) . offset ( ATA_REG_STATUS ) . in < u8 > ( ) ;
2020-06-24 14:07:28 -06:00
m_entropy_source . add_random_event ( status ) ;
2020-12-19 12:50:57 +02:00
u8 bstatus = m_io_group . bus_master_base ( ) . offset ( 2 ) . in < u8 > ( ) ;
2020-06-29 17:15:29 -06:00
if ( ! ( bstatus & 0x4 ) ) {
// interrupt not from this device, ignore
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: ignore interrupt " ) ;
2020-06-29 17:15:29 -06:00
return ;
}
2020-11-02 11:16:01 -07:00
ScopedSpinLock lock ( m_request_lock ) ;
2021-01-23 23:29:11 +01:00
# if PATA_DEBUG
2020-12-19 12:50:57 +02:00
klog ( ) < < " IDEChannel: interrupt: DRQ= " < < ( ( status & ATA_SR_DRQ ) ! = 0 ) < < " BSY= " < < ( ( status & ATA_SR_BSY ) ! = 0 ) < < " DRDY= " < < ( ( status & ATA_SR_DRDY ) ! = 0 ) ;
2020-11-02 11:16:01 -07:00
# endif
if ( ! m_current_request ) {
2021-01-23 23:29:11 +01:00
# if PATA_DEBUG
2021-01-09 18:51:44 +01:00
dbgln ( " IDEChannel: IRQ but no pending request! " ) ;
2020-11-02 11:16:01 -07:00
# endif
return ;
}
2019-05-24 04:17:15 +02:00
if ( status & ATA_SR_ERR ) {
2019-02-11 11:38:14 +01:00
print_ide_status ( status ) ;
2020-12-19 12:50:57 +02:00
m_device_error = m_io_group . io_base ( ) . offset ( ATA_REG_ERROR ) . in < u8 > ( ) ;
2020-12-25 16:45:35 +01:00
dbgln ( " IDEChannel: Error {:#02x}! " , ( u8 ) m_device_error ) ;
2021-01-29 19:37:40 +00:00
try_disambiguate_error ( ) ;
2020-11-02 11:16:01 -07:00
complete_current_request ( AsyncDeviceRequest : : Failure ) ;
return ;
}
m_device_error = 0 ;
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
if ( m_current_request_uses_dma ) {
2020-11-02 11:16:01 -07:00
complete_current_request ( AsyncDeviceRequest : : Success ) ;
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
return ;
}
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
Processor : : deferred_call_queue ( [ this ] ( ) {
ScopedSpinLock lock ( m_request_lock ) ;
if ( m_current_request - > request_type ( ) = = AsyncBlockDeviceRequest : : Read ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: Read block {}/{} " , m_current_request_block_index , m_current_request - > block_count ( ) ) ;
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
if ( ata_do_read_sector ( ) ) {
if ( + + m_current_request_block_index > = m_current_request - > block_count ( ) ) {
complete_current_request ( AsyncDeviceRequest : : Success ) ;
return ;
2020-11-02 11:16:01 -07:00
}
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
// Wait for the next block
enable_irq ( ) ;
}
} else {
if ( ! m_current_request_flushing_cache ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: Wrote block {}/{} " , m_current_request_block_index , m_current_request - > block_count ( ) ) ;
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
if ( + + m_current_request_block_index > = m_current_request - > block_count ( ) ) {
// We read the last block, flush cache
ASSERT ( ! m_current_request_flushing_cache ) ;
m_current_request_flushing_cache = true ;
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( ATA_CMD_CACHE_FLUSH ) ;
2020-11-02 11:16:01 -07:00
} else {
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
// Read next block
ata_do_write_sector ( ) ;
2020-11-02 11:16:01 -07:00
}
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
} else {
complete_current_request ( AsyncDeviceRequest : : Success ) ;
2020-11-02 11:16:01 -07:00
}
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
}
} ) ;
2018-11-10 15:15:31 +01:00
}
2019-09-04 11:10:07 +02:00
static void io_delay ( )
2018-11-10 15:15:31 +01:00
{
2019-07-28 23:44:01 +10:00
for ( int i = 0 ; i < 4 ; + + i )
2019-09-04 11:10:07 +02:00
IO : : in8 ( 0x3f6 ) ;
2019-07-28 23:44:01 +10:00
}
2018-11-10 15:15:31 +01:00
2021-01-29 19:37:40 +00:00
void IDEChannel : : wait_until_not_busy ( )
{
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
while ( m_io_group . control_base ( ) . in < u8 > ( ) & ATA_SR_BSY )
2021-01-29 19:37:40 +00:00
;
}
String IDEChannel : : channel_type_string ( ) const
{
if ( m_channel_type = = ChannelType : : Primary )
return " Primary " ;
return " Secondary " ;
}
2020-12-19 12:50:57 +02:00
void IDEChannel : : detect_disks ( )
2019-07-28 23:44:01 +10:00
{
2021-01-29 19:37:40 +00:00
auto channel_string = [ ] ( u8 i ) - > const char * {
if ( i = = 0 )
return " master " ;
return " slave " ;
} ;
2019-07-28 23:44:01 +10:00
// There are only two possible disks connected to a channel
for ( auto i = 0 ; i < 2 ; i + + ) {
2020-12-19 12:50:57 +02:00
m_io_group . io_base ( ) . offset ( ATA_REG_HDDEVSEL ) . out < u8 > ( 0xA0 | ( i < < 4 ) ) ; // First, we need to select the drive itself
2018-11-10 15:15:31 +01:00
2020-12-19 12:50:57 +02:00
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( ATA_CMD_IDENTIFY ) ; // Send the ATA_IDENTIFY command
2018-11-10 15:15:31 +01:00
2019-07-28 23:44:01 +10:00
// Wait for the BSY flag to be reset
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
while ( m_io_group . control_base ( ) . in < u8 > ( ) & ATA_SR_BSY )
2019-07-28 23:44:01 +10:00
;
2018-11-10 15:15:31 +01:00
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
if ( m_io_group . control_base ( ) . in < u8 > ( ) = = 0x00 ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: No {} {} disk detected! " , channel_type_string ( ) . to_lowercase ( ) , channel_string ( i ) ) ;
2019-07-28 23:44:01 +10:00
continue ;
}
2018-11-10 15:15:31 +01:00
2021-01-29 19:37:40 +00:00
bool check_for_atapi = false ;
PATADiskDevice : : InterfaceType interface_type = PATADiskDevice : : InterfaceType : : ATA ;
for ( ; ; ) {
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
u8 status = m_io_group . control_base ( ) . in < u8 > ( ) ;
2021-01-29 19:37:40 +00:00
if ( status & ATA_SR_ERR ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: {} {} device is not ATA. Will check for ATAPI. " , channel_type_string ( ) , channel_string ( i ) ) ;
2021-01-29 19:37:40 +00:00
check_for_atapi = true ;
break ;
}
if ( ! ( status & ATA_SR_BSY ) & & ( status & ATA_SR_DRQ ) ) {
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: {} {} device appears to be ATA. " , channel_type_string ( ) , channel_string ( i ) ) ;
2021-01-29 19:37:40 +00:00
interface_type = PATADiskDevice : : InterfaceType : : ATA ;
break ;
}
}
if ( check_for_atapi ) {
u8 cl = m_io_group . io_base ( ) . offset ( ATA_REG_LBA1 ) . in < u8 > ( ) ;
u8 ch = m_io_group . io_base ( ) . offset ( ATA_REG_LBA2 ) . in < u8 > ( ) ;
if ( ( cl = = 0x14 & & ch = = 0xEB ) | | ( cl = = 0x69 & & ch = = 0x96 ) ) {
interface_type = PATADiskDevice : : InterfaceType : : ATAPI ;
dbgln ( " IDEChannel: {} {} device appears to be ATAPI. We're going to ignore it for now as we don't support it. " , channel_type_string ( ) , channel_string ( i ) ) ;
continue ;
} else {
dbgln ( " IDEChannel: {} {} device doesn't appear to be ATA or ATAPI. Ignoring it. " , channel_type_string ( ) , channel_string ( i ) ) ;
continue ;
}
}
2019-07-28 23:44:01 +10:00
ByteBuffer wbuf = ByteBuffer : : create_uninitialized ( 512 ) ;
ByteBuffer bbuf = ByteBuffer : : create_uninitialized ( 512 ) ;
2019-09-30 08:57:01 +02:00
u8 * b = bbuf . data ( ) ;
u16 * w = ( u16 * ) wbuf . data ( ) ;
const u16 * wbufbase = ( u16 * ) wbuf . data ( ) ;
2019-07-28 23:44:01 +10:00
for ( u32 i = 0 ; i < 256 ; + + i ) {
2020-12-19 12:50:57 +02:00
u16 data = m_io_group . io_base ( ) . offset ( ATA_REG_DATA ) . in < u16 > ( ) ;
2019-07-28 23:44:01 +10:00
* ( w + + ) = data ;
* ( b + + ) = MSB ( data ) ;
* ( b + + ) = LSB ( data ) ;
}
2019-05-19 03:46:50 +02:00
2019-07-28 23:44:01 +10:00
// "Unpad" the device name string.
for ( u32 i = 93 ; i > 54 & & bbuf [ i ] = = ' ' ; - - i )
bbuf [ i ] = 0 ;
2021-01-29 19:37:40 +00:00
u16 cyls = wbufbase [ ATA_IDENT_CYLINDERS / sizeof ( u16 ) ] ;
u16 heads = wbufbase [ ATA_IDENT_HEADS / sizeof ( u16 ) ] ;
u16 spt = wbufbase [ ATA_IDENT_SECTORS / sizeof ( u16 ) ] ;
u16 capabilities = wbufbase [ ATA_IDENT_CAPABILITIES / sizeof ( u16 ) ] ;
2020-12-19 12:50:57 +02:00
if ( cyls = = 0 | | heads = = 0 | | spt = = 0 )
continue ;
2021-01-29 19:37:40 +00:00
dbgln ( " IDEChannel: {} {} device found: Type={}, Name={}, C/H/Spt={}/{}/{}, Capabilities=0x{:04x} " , channel_type_string ( ) , channel_string ( i ) , interface_type = = PATADiskDevice : : InterfaceType : : ATA ? " ATA " : " ATAPI " , ( ( char * ) bbuf . data ( ) + 54 ) , cyls , heads , spt , capabilities ) ;
2019-07-28 23:44:01 +10:00
if ( i = = 0 ) {
2021-01-29 19:37:40 +00:00
m_master = PATADiskDevice : : create ( m_parent_controller , * this , PATADiskDevice : : DriveType : : Master , interface_type , cyls , heads , spt , capabilities , 3 , ( m_channel_type = = ChannelType : : Primary ) ? 0 : 2 ) ;
2019-07-28 23:44:01 +10:00
} else {
2021-01-29 19:37:40 +00:00
m_slave = PATADiskDevice : : create ( m_parent_controller , * this , PATADiskDevice : : DriveType : : Slave , interface_type , cyls , heads , spt , capabilities , 3 , ( m_channel_type = = ChannelType : : Primary ) ? 1 : 3 ) ;
2019-07-28 23:44:01 +10:00
}
2019-05-19 03:46:50 +02:00
}
}
2021-01-29 19:37:40 +00:00
void IDEChannel : : ata_access ( Direction direction , bool slave_request , u32 lba , u8 block_count , u16 capabilities , bool use_dma )
{
LBAMode lba_mode ;
u8 head = 0 ;
u8 sector = 0 ;
u16 cylinder = 0 ;
if ( lba > = 0x10000000 ) {
ASSERT ( capabilities & ATA_CAP_LBA ) ;
lba_mode = LBAMode : : FortyEightBit ;
head = 0 ;
} else if ( capabilities & ATA_CAP_LBA ) {
lba_mode = LBAMode : : TwentyEightBit ;
head = ( lba & 0xF000000 ) > > 24 ;
} else {
lba_mode = LBAMode : : None ;
sector = ( lba % 63 ) + 1 ;
cylinder = ( lba + 1 - sector ) / ( 16 * 63 ) ;
head = ( lba + 1 - sector ) % ( 16 * 63 ) / ( 63 ) ;
}
wait_until_not_busy ( ) ;
if ( lba_mode = = LBAMode : : None )
m_io_group . io_base ( ) . offset ( ATA_REG_HDDEVSEL ) . out < u8 > ( 0xA0 | ( static_cast < u8 > ( slave_request ) < < 4 ) | head ) ;
else
m_io_group . io_base ( ) . offset ( ATA_REG_HDDEVSEL ) . out < u8 > ( 0xE0 | ( static_cast < u8 > ( slave_request ) < < 4 ) | head ) ;
if ( lba_mode = = LBAMode : : FortyEightBit ) {
m_io_group . io_base ( ) . offset ( ATA_REG_SECCOUNT1 ) . out < u8 > ( 0 ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA3 ) . out < u8 > ( ( lba & 0xFF000000 ) > > 24 ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA4 ) . out < u8 > ( 0 ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA5 ) . out < u8 > ( 0 ) ;
}
m_io_group . io_base ( ) . offset ( ATA_REG_SECCOUNT0 ) . out < u8 > ( block_count ) ;
if ( lba_mode = = LBAMode : : FortyEightBit | | lba_mode = = LBAMode : : TwentyEightBit ) {
m_io_group . io_base ( ) . offset ( ATA_REG_LBA0 ) . out < u8 > ( ( lba & 0x000000FF ) > > 0 ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA1 ) . out < u8 > ( ( lba & 0x0000FF00 ) > > 8 ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA2 ) . out < u8 > ( ( lba & 0x00FF0000 ) > > 16 ) ;
} else {
m_io_group . io_base ( ) . offset ( ATA_REG_LBA0 ) . out < u8 > ( sector ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA1 ) . out < u8 > ( ( cylinder > > 0 ) & 0xFF ) ;
m_io_group . io_base ( ) . offset ( ATA_REG_LBA2 ) . out < u8 > ( ( cylinder > > 8 ) & 0xFF ) ;
}
for ( ; ; ) {
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
auto status = m_io_group . control_base ( ) . in < u8 > ( ) ;
2021-01-29 19:37:40 +00:00
if ( ! ( status & ATA_SR_BSY ) & & ( status & ATA_SR_DRDY ) )
break ;
}
if ( lba_mode ! = LBAMode : : FortyEightBit ) {
if ( use_dma )
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( direction = = Direction : : Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA ) ;
else
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( direction = = Direction : : Read ? ATA_CMD_READ_PIO : ATA_CMD_WRITE_PIO ) ;
} else {
if ( use_dma )
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( direction = = Direction : : Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT ) ;
else
m_io_group . io_base ( ) . offset ( ATA_REG_COMMAND ) . out < u8 > ( direction = = Direction : : Read ? ATA_CMD_READ_PIO_EXT : ATA_CMD_WRITE_PIO_EXT ) ;
}
enable_irq ( ) ;
}
void IDEChannel : : ata_read_sectors_with_dma ( bool slave_request , u16 capabilities )
2019-05-19 03:46:50 +02:00
{
2020-11-02 11:16:01 -07:00
auto & request = * m_current_request ;
u32 lba = request . block_index ( ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::ata_read_sectors_with_dma ({} x {}) " , lba , request . block_count ( ) ) ;
2019-05-19 03:46:50 +02:00
2020-01-17 19:59:20 +01:00
prdt ( ) . offset = m_dma_buffer_page - > paddr ( ) ;
2020-11-02 11:16:01 -07:00
prdt ( ) . size = 512 * request . block_count ( ) ;
2019-05-19 04:40:30 +02:00
2020-01-17 19:59:20 +01:00
ASSERT ( prdt ( ) . size < = PAGE_SIZE ) ;
2019-05-19 03:46:50 +02:00
// Stop bus master
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . out < u8 > ( 0 ) ;
2019-05-19 03:46:50 +02:00
// Write the PRDT location
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . offset ( 4 ) . out ( m_prdt_page - > paddr ( ) . get ( ) ) ;
2019-05-19 03:46:50 +02:00
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . offset ( 2 ) . out < u8 > ( m_io_group . bus_master_base ( ) . offset ( 2 ) . in < u8 > ( ) | 0x6 ) ;
2019-05-19 03:46:50 +02:00
// Set transfer direction
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . out < u8 > ( 0x8 ) ;
2019-05-19 03:46:50 +02:00
2021-01-29 19:37:40 +00:00
ata_access ( Direction : : Read , slave_request , lba , request . block_count ( ) , capabilities , true ) ;
2019-05-19 03:46:50 +02:00
// Start bus master
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . out < u8 > ( 0x9 ) ;
2020-11-02 11:16:01 -07:00
}
2019-05-19 03:46:50 +02:00
2020-12-19 12:50:57 +02:00
bool IDEChannel : : ata_do_read_sector ( )
2020-11-02 11:16:01 -07:00
{
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::ata_do_read_sector " ) ;
2020-11-02 11:16:01 -07:00
auto & request = * m_current_request ;
auto out_buffer = request . buffer ( ) . offset ( m_current_request_block_index * 512 ) ;
ssize_t nwritten = request . write_to_buffer_buffered < 512 > ( out_buffer , 512 , [ & ] ( u8 * buffer , size_t buffer_bytes ) {
for ( size_t i = 0 ; i < buffer_bytes ; i + = sizeof ( u16 ) )
2020-12-19 12:50:57 +02:00
* ( u16 * ) & buffer [ i ] = IO : : in16 ( m_io_group . io_base ( ) . offset ( ATA_REG_DATA ) . get ( ) ) ;
2020-11-02 11:16:01 -07:00
return ( ssize_t ) buffer_bytes ;
} ) ;
if ( nwritten < 0 ) {
// TODO: Do we need to abort the PATA read if this wasn't the last block?
complete_current_request ( AsyncDeviceRequest : : MemoryFault ) ;
2019-05-19 03:56:06 +02:00
return false ;
2020-11-02 11:16:01 -07:00
}
2019-05-19 03:46:50 +02:00
return true ;
2018-11-10 15:15:31 +01:00
}
2021-01-29 19:37:40 +00:00
// FIXME: This doesn't quite work and locks up reading LBA 3.
void IDEChannel : : ata_read_sectors ( bool slave_request , u16 capabilities )
2018-11-10 15:15:31 +01:00
{
2020-11-02 11:16:01 -07:00
auto & request = * m_current_request ;
ASSERT ( request . block_count ( ) < = 256 ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::ata_read_sectors " ) ;
2020-11-02 11:16:01 -07:00
2020-11-02 11:16:01 -07:00
auto lba = request . block_index ( ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: Reading {} sector(s) @ LBA {} " , request . block_count ( ) , lba ) ;
2019-05-26 14:58:21 +02:00
2021-01-29 19:37:40 +00:00
ata_access ( Direction : : Read , slave_request , lba , request . block_count ( ) , capabilities , false ) ;
2019-05-26 14:58:21 +02:00
}
2021-01-29 19:37:40 +00:00
void IDEChannel : : ata_write_sectors_with_dma ( bool slave_request , u16 capabilities )
2018-11-18 14:57:41 +01:00
{
2020-11-02 11:16:01 -07:00
auto & request = * m_current_request ;
u32 lba = request . block_index ( ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel::ata_write_sectors_with_dma ({} x {}) " , lba , request . block_count ( ) ) ;
2019-07-28 23:44:01 +10:00
2020-11-02 11:16:01 -07:00
prdt ( ) . offset = m_dma_buffer_page - > paddr ( ) ;
prdt ( ) . size = 512 * request . block_count ( ) ;
2019-07-28 23:44:01 +10:00
2020-11-02 11:16:01 -07:00
if ( ! request . read_from_buffer ( request . buffer ( ) , m_dma_buffer_page - > paddr ( ) . offset ( 0xc0000000 ) . as_ptr ( ) , 512 * request . block_count ( ) ) ) {
complete_current_request ( AsyncDeviceRequest : : MemoryFault ) ;
return ;
}
2020-11-04 21:25:26 +01:00
2020-11-02 11:16:01 -07:00
ASSERT ( prdt ( ) . size < = PAGE_SIZE ) ;
// Stop bus master
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . out < u8 > ( 0 ) ;
2020-11-02 11:16:01 -07:00
// Write the PRDT location
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . offset ( 4 ) . out < u32 > ( m_prdt_page - > paddr ( ) . get ( ) ) ;
2020-11-02 11:16:01 -07:00
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . offset ( 2 ) . out < u8 > ( m_io_group . bus_master_base ( ) . offset ( 2 ) . in < u8 > ( ) | 0x6 ) ;
2020-11-02 11:16:01 -07:00
2021-01-29 19:37:40 +00:00
ata_access ( Direction : : Write , slave_request , lba , request . block_count ( ) , capabilities , true ) ;
2019-07-28 23:44:01 +10:00
2020-11-02 11:16:01 -07:00
// Start bus master
2020-12-19 12:50:57 +02:00
m_io_group . bus_master_base ( ) . out < u8 > ( 0x1 ) ;
2020-11-02 11:16:01 -07:00
}
2020-04-15 15:46:36 +03:00
2020-12-19 12:50:57 +02:00
void IDEChannel : : ata_do_write_sector ( )
2020-11-02 11:16:01 -07:00
{
auto & request = * m_current_request ;
2020-04-15 15:46:36 +03:00
2020-11-02 11:16:01 -07:00
io_delay ( ) ;
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
while ( ( m_io_group . control_base ( ) . in < u8 > ( ) & ATA_SR_BSY ) | | ! ( m_io_group . control_base ( ) . in < u8 > ( ) & ATA_SR_DRQ ) )
2020-11-02 11:16:01 -07:00
;
2020-11-04 21:25:26 +01:00
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
u8 status = m_io_group . control_base ( ) . in < u8 > ( ) ;
2020-11-02 11:16:01 -07:00
ASSERT ( status & ATA_SR_DRQ ) ;
2020-11-04 21:25:26 +01:00
2020-11-02 11:16:01 -07:00
auto in_buffer = request . buffer ( ) . offset ( m_current_request_block_index * 512 ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: Writing 512 bytes (part {}) (status={:#02x})... " , m_current_request_block_index , status ) ;
2020-11-02 11:16:01 -07:00
ssize_t nread = request . read_from_buffer_buffered < 512 > ( in_buffer , 512 , [ & ] ( const u8 * buffer , size_t buffer_bytes ) {
for ( size_t i = 0 ; i < buffer_bytes ; i + = sizeof ( u16 ) )
2020-12-19 12:50:57 +02:00
IO : : out16 ( m_io_group . io_base ( ) . offset ( ATA_REG_DATA ) . get ( ) , * ( const u16 * ) & buffer [ i ] ) ;
2020-11-02 11:16:01 -07:00
return ( ssize_t ) buffer_bytes ;
} ) ;
if ( nread < 0 )
complete_current_request ( AsyncDeviceRequest : : MemoryFault ) ;
2019-07-28 23:44:01 +10:00
}
2021-01-29 19:37:40 +00:00
// FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
void IDEChannel : : ata_write_sectors ( bool slave_request , u16 capabilities )
2019-07-28 23:44:01 +10:00
{
2020-11-02 11:16:01 -07:00
auto & request = * m_current_request ;
ASSERT ( request . block_count ( ) < = 256 ) ;
u32 start_sector = request . block_index ( ) ;
u32 count = request . block_count ( ) ;
2021-02-07 15:33:24 +03:30
dbgln_if ( PATA_DEBUG , " IDEChannel: Writing {} sector(s) @ LBA {} " , count , start_sector ) ;
2018-11-18 14:57:41 +01:00
Kernel: Restore IDE PIO functionality
This change can be actually seen as two logical changes, the first
change is about to ensure we only read the ATA Status register only
once, because if we read it multiple times, we acknowledge interrupts
unintentionally. To solve this issue, we always use the alternate Status
register and only read the original status register in the IRQ handler.
The second change is how we handle interrupts - if we use DMA, we can
just complete the request and return from the IRQ handler. For PIO mode,
it's more complicated. For PIO write operation, after setting the ATA
registers, we send out the data to IO port, and wait for an interrupt.
For PIO read operation, we set the ATA registers, and wait for an
interrupt to fire, then we just read from the data IO port.
2021-02-01 19:12:08 +02:00
ata_access ( Direction : : Write , slave_request , start_sector , request . block_count ( ) , capabilities , false ) ;
2020-11-02 11:16:01 -07:00
ata_do_write_sector ( ) ;
2018-11-18 14:57:41 +01:00
}
2020-02-16 01:27:42 +01:00
}