mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 01:09:38 -05:00
1122c0c1cc
Move the cache control settings into the queue_limits so that the flags can be set atomically with the device queue frozen. Add new features and flags field for the driver set flags, and internal (usually sysfs-controlled) flags in the block layer. Note that we'll eventually remove enough field from queue_limits to bring it back to the previous size. The disable flag is inverted compared to the previous meaning, which means it now survives a rescan, similar to the max_sectors and max_discard_sectors user limits. The FLUSH and FUA flags are now inherited by blk_stack_limits, which simplified the code in dm a lot, but also causes a slight behavior change in that dm-switch and dm-unstripe now advertise a write cache despite setting num_flush_bios to 0. The I/O path will handle this gracefully, but as far as I can tell the lack of num_flush_bios and thus flush support is a pre-existing data integrity bug in those targets that really needs fixing, after which a non-zero num_flush_bios should be required in dm for targets that map to underlying devices. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Ulf Hansson <ulf.hansson@linaro.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://lore.kernel.org/r/20240617060532.127975-14-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
123 lines
2.9 KiB
C
123 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef MMC_QUEUE_H
|
|
#define MMC_QUEUE_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/blk-mq.h>
|
|
#include <linux/mmc/core.h>
|
|
#include <linux/mmc/host.h>
|
|
|
|
enum mmc_issued {
|
|
MMC_REQ_STARTED,
|
|
MMC_REQ_BUSY,
|
|
MMC_REQ_FAILED_TO_START,
|
|
MMC_REQ_FINISHED,
|
|
};
|
|
|
|
enum mmc_issue_type {
|
|
MMC_ISSUE_SYNC,
|
|
MMC_ISSUE_DCMD,
|
|
MMC_ISSUE_ASYNC,
|
|
MMC_ISSUE_MAX,
|
|
};
|
|
|
|
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
|
|
{
|
|
return blk_mq_rq_to_pdu(rq);
|
|
}
|
|
|
|
struct mmc_queue_req;
|
|
|
|
static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
|
|
{
|
|
return blk_mq_rq_from_pdu(mqr);
|
|
}
|
|
|
|
struct mmc_blk_data;
|
|
struct mmc_blk_ioc_data;
|
|
|
|
struct mmc_blk_request {
|
|
struct mmc_request mrq;
|
|
struct mmc_command sbc;
|
|
struct mmc_command cmd;
|
|
struct mmc_command stop;
|
|
struct mmc_data data;
|
|
};
|
|
|
|
/**
|
|
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
|
|
* @MMC_DRV_OP_IOCTL: ioctl operation
|
|
* @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
|
|
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
|
|
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
|
|
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
|
|
*/
|
|
enum mmc_drv_op {
|
|
MMC_DRV_OP_IOCTL,
|
|
MMC_DRV_OP_IOCTL_RPMB,
|
|
MMC_DRV_OP_BOOT_WP,
|
|
MMC_DRV_OP_GET_CARD_STATUS,
|
|
MMC_DRV_OP_GET_EXT_CSD,
|
|
};
|
|
|
|
struct mmc_queue_req {
|
|
struct mmc_blk_request brq;
|
|
struct scatterlist *sg;
|
|
enum mmc_drv_op drv_op;
|
|
int drv_op_result;
|
|
void *drv_op_data;
|
|
unsigned int ioc_count;
|
|
int retries;
|
|
};
|
|
|
|
struct mmc_queue {
|
|
struct mmc_card *card;
|
|
struct mmc_ctx ctx;
|
|
struct blk_mq_tag_set tag_set;
|
|
struct mmc_blk_data *blkdata;
|
|
struct request_queue *queue;
|
|
spinlock_t lock;
|
|
int in_flight[MMC_ISSUE_MAX];
|
|
unsigned int cqe_busy;
|
|
#define MMC_CQE_DCMD_BUSY BIT(0)
|
|
bool busy;
|
|
bool recovery_needed;
|
|
bool in_recovery;
|
|
bool rw_wait;
|
|
bool waiting;
|
|
struct work_struct recovery_work;
|
|
wait_queue_head_t wait;
|
|
struct request *recovery_req;
|
|
struct request *complete_req;
|
|
struct mutex complete_lock;
|
|
struct work_struct complete_work;
|
|
};
|
|
|
|
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
unsigned int features);
|
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
|
extern void mmc_queue_resume(struct mmc_queue *);
|
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
|
struct mmc_queue_req *);
|
|
|
|
void mmc_cqe_check_busy(struct mmc_queue *mq);
|
|
void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
|
|
|
|
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
|
|
|
|
static inline int mmc_tot_in_flight(struct mmc_queue *mq)
|
|
{
|
|
return mq->in_flight[MMC_ISSUE_SYNC] +
|
|
mq->in_flight[MMC_ISSUE_DCMD] +
|
|
mq->in_flight[MMC_ISSUE_ASYNC];
|
|
}
|
|
|
|
static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
|
|
{
|
|
return mq->in_flight[MMC_ISSUE_DCMD] +
|
|
mq->in_flight[MMC_ISSUE_ASYNC];
|
|
}
|
|
|
|
#endif
|