You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
kernel_samsung_sm7125/drivers/cdrom/gdrom.c

900 lines
23 KiB

/* GD ROM driver for the SEGA Dreamcast
* copyright Adrian McMenamin, 2007
* With thanks to Marcus Comstedt and Nathan Keynes
* for work in reversing PIO and DMA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/cdrom.h>
#include <linux/genhd.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <scsi/scsi.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/delay.h>
#include <mach/dma.h>
#include <mach/sysasic.h>
#define GDROM_DEV_NAME "gdrom"
#define GD_SESSION_OFFSET 150
/* GD Rom commands */
#define GDROM_COM_SOFTRESET 0x08
#define GDROM_COM_EXECDIAG 0x90
#define GDROM_COM_PACKET 0xA0
#define GDROM_COM_IDDEV 0xA1
/* GD Rom registers */
#define GDROM_BASE_REG 0xA05F7000
#define GDROM_ALTSTATUS_REG (GDROM_BASE_REG + 0x18)
#define GDROM_DATA_REG (GDROM_BASE_REG + 0x80)
#define GDROM_ERROR_REG (GDROM_BASE_REG + 0x84)
#define GDROM_INTSEC_REG (GDROM_BASE_REG + 0x88)
#define GDROM_SECNUM_REG (GDROM_BASE_REG + 0x8C)
#define GDROM_BCL_REG (GDROM_BASE_REG + 0x90)
#define GDROM_BCH_REG (GDROM_BASE_REG + 0x94)
#define GDROM_DSEL_REG (GDROM_BASE_REG + 0x98)
#define GDROM_STATUSCOMMAND_REG (GDROM_BASE_REG + 0x9C)
#define GDROM_RESET_REG (GDROM_BASE_REG + 0x4E4)
#define GDROM_DMA_STARTADDR_REG (GDROM_BASE_REG + 0x404)
#define GDROM_DMA_LENGTH_REG (GDROM_BASE_REG + 0x408)
#define GDROM_DMA_DIRECTION_REG (GDROM_BASE_REG + 0x40C)
#define GDROM_DMA_ENABLE_REG (GDROM_BASE_REG + 0x414)
#define GDROM_DMA_STATUS_REG (GDROM_BASE_REG + 0x418)
#define GDROM_DMA_WAIT_REG (GDROM_BASE_REG + 0x4A0)
#define GDROM_DMA_ACCESS_CTRL_REG (GDROM_BASE_REG + 0x4B8)
#define GDROM_HARD_SECTOR 2048
#define BLOCK_LAYER_SECTOR 512
#define GD_TO_BLK 4
#define GDROM_DEFAULT_TIMEOUT (HZ * 7)
static DEFINE_MUTEX(gdrom_mutex);
static const struct {
int sense_key;
const char * const text;
} sense_texts[] = {
{NO_SENSE, "OK"},
{RECOVERED_ERROR, "Recovered from error"},
{NOT_READY, "Device not ready"},
{MEDIUM_ERROR, "Disk not ready"},
{HARDWARE_ERROR, "Hardware error"},
{ILLEGAL_REQUEST, "Command has failed"},
{UNIT_ATTENTION, "Device needs attention - disk may have been changed"},
{DATA_PROTECT, "Data protection error"},
{ABORTED_COMMAND, "Command aborted"},
};
static struct platform_device *pd;
static int gdrom_major;
static DECLARE_WAIT_QUEUE_HEAD(command_queue);
static DECLARE_WAIT_QUEUE_HEAD(request_queue);
static DEFINE_SPINLOCK(gdrom_lock);
static void gdrom_readdisk_dma(struct work_struct *work);
static DECLARE_WORK(work, gdrom_readdisk_dma);
static LIST_HEAD(gdrom_deferred);
struct gdromtoc {
unsigned int entry[99];
unsigned int first, last;
unsigned int leadout;
};
static struct gdrom_unit {
struct gendisk *disk;
struct cdrom_device_info *cd_info;
int status;
int pending;
int transfer;
char disk_type;
struct gdromtoc *toc;
struct request_queue *gdrom_rq;
} gd;
struct gdrom_id {
char mid;
char modid;
char verid;
char padA[13];
char mname[16];
char modname[16];
char firmver[16];
char padB[16];
};
static int gdrom_getsense(short *bufstring);
static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
struct packet_command *command);
static int gdrom_hardreset(struct cdrom_device_info *cd_info);
static bool gdrom_is_busy(void)
{
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
}
static bool gdrom_data_request(void)
{
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
}
static bool gdrom_wait_clrbusy(void)
{
unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
while ((__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) &&
(time_before(jiffies, timeout)))
cpu_relax();
return time_before(jiffies, timeout + 1);
}
static bool gdrom_wait_busy_sleeps(void)
{
unsigned long timeout;
/* Wait to get busy first */
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
while (!gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
/* Now wait for busy to clear */
return gdrom_wait_clrbusy();
}
static void gdrom_identifydevice(void *buf)
{
int c;
short *data = buf;
/* If the device won't clear it has probably
* been hit by a serious failure - but we'll
* try to return a sense key even so */
if (!gdrom_wait_clrbusy()) {
gdrom_getsense(NULL);
return;
}
__raw_writeb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps()) {
gdrom_getsense(NULL);
return;
}
/* now read in the data */
for (c = 0; c < 40; c++)
data[c] = __raw_readw(GDROM_DATA_REG);
}
static void gdrom_spicommand(void *spi_string, int buflen)
{
short *cmd = spi_string;
unsigned long timeout;
/* ensure IRQ_WAIT is set */
__raw_writeb(0x08, GDROM_ALTSTATUS_REG);
/* specify how many bytes we expect back */
__raw_writeb(buflen & 0xFF, GDROM_BCL_REG);
__raw_writeb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
/* other parameters */
__raw_writeb(0, GDROM_INTSEC_REG);
__raw_writeb(0, GDROM_SECNUM_REG);
__raw_writeb(0, GDROM_ERROR_REG);
/* Wait until we can go */
if (!gdrom_wait_clrbusy()) {
gdrom_getsense(NULL);
return;
}
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
while (!gdrom_data_request() && time_before(jiffies, timeout))
cpu_relax();
if (!time_before(jiffies, timeout + 1)) {
gdrom_getsense(NULL);
return;
}
outsw(GDROM_DATA_REG, cmd, 6);
}
/* gdrom_command_executediagnostic:
* Used to probe for presence of working GDROM
* Restarts GDROM device and then applies standard ATA 3
* Execute Diagnostic Command: a return of '1' indicates device 0
* present and device 1 absent
*/
static char gdrom_execute_diagnostic(void)
{
gdrom_hardreset(gd.cd_info);
if (!gdrom_wait_clrbusy())
return 0;
__raw_writeb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return 0;
return __raw_readb(GDROM_ERROR_REG);
}
/*
* Prepare disk command
* byte 0 = 0x70
* byte 1 = 0x1f
*/
static int gdrom_preparedisk_cmd(void)
{
struct packet_command *spin_command;
spin_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!spin_command)
return -ENOMEM;
spin_command->cmd[0] = 0x70;
spin_command->cmd[2] = 0x1f;
spin_command->buflen = 0;
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, spin_command);
/* 60 second timeout */
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
gd.pending = 0;
kfree(spin_command);
if (gd.status & 0x01) {
/* log an error */
gdrom_getsense(NULL);
return -EIO;
}
return 0;
}
/*
* Read TOC command
* byte 0 = 0x14
* byte 1 = session
* byte 3 = sizeof TOC >> 8 ie upper byte
* byte 4 = sizeof TOC & 0xff ie lower byte
*/
static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session)
{
int tocsize;
struct packet_command *toc_command;
int err = 0;
toc_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!toc_command)
return -ENOMEM;
tocsize = sizeof(struct gdromtoc);
toc_command->cmd[0] = 0x14;
toc_command->cmd[1] = session;
toc_command->cmd[3] = tocsize >> 8;
toc_command->cmd[4] = tocsize & 0xff;
toc_command->buflen = tocsize;
if (gd.pending) {
err = -EBUSY;
goto cleanup_readtoc_final;
}
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, toc_command);
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
if (gd.pending) {
err = -EINVAL;
goto cleanup_readtoc;
}
insw(GDROM_DATA_REG, toc, tocsize/2);
if (gd.status & 0x01)
err = -EINVAL;
cleanup_readtoc:
gd.pending = 0;
cleanup_readtoc_final:
kfree(toc_command);
return err;
}
/* TOC helpers */
static int get_entry_lba(int track)
{
return (cpu_to_be32(track & 0xffffff00) - GD_SESSION_OFFSET);
}
static int get_entry_q_ctrl(int track)
{
return (track & 0x000000f0) >> 4;
}
static int get_entry_track(int track)
{
return (track & 0x0000ff00) >> 8;
}
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
int fentry, lentry, track, data, tocuse, err;
if (!gd.toc)
return -ENOMEM;
tocuse = 1;
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
return -ENXIO;
}
}
fentry = get_entry_track(gd.toc->first);
lentry = get_entry_track(gd.toc->last);
/* Find the first data track */
track = get_entry_track(gd.toc->last);
do {
data = gd.toc->entry[track - 1];
if (get_entry_q_ctrl(data))
break; /* ie a real data track */
track--;
} while (track >= fentry);
if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
pr_info("No data on the last session of the CD\n");
gdrom_getsense(NULL);
return -ENXIO;
}
ms_info->addr_format = CDROM_LBA;
ms_info->addr.lba = get_entry_lba(data);
ms_info->xa_flag = 1;
return 0;
}
static int gdrom_open(struct cdrom_device_info *cd_info, int purpose)
{
/* spin up the disk */
return gdrom_preparedisk_cmd();
}
/* this function is required even if empty */
static void gdrom_release(struct cdrom_device_info *cd_info)
{
}
static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
{
/* read the sense key */
char sense = __raw_readb(GDROM_ERROR_REG);
sense &= 0xF0;
if (sense == 0)
return CDS_DISC_OK;
if (sense == 0x20)
return CDS_DRIVE_NOT_READY;
/* default */
return CDS_NO_INFO;
}
static unsigned int gdrom_check_events(struct cdrom_device_info *cd_info,
unsigned int clearing, int ignore)
{
/* check the sense key */
return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60 ?
DISK_EVENT_MEDIA_CHANGE : 0;
}
/* reset the G1 bus */
static int gdrom_hardreset(struct cdrom_device_info *cd_info)
{
int count;
__raw_writel(0x1fffff, GDROM_RESET_REG);
for (count = 0xa0000000; count < 0xa0200000; count += 4)
__raw_readl(count);
return 0;
}
/* keep the function looking like the universal
* CD Rom specification - returning int */
static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
struct packet_command *command)
{
gdrom_spicommand(&command->cmd, command->buflen);
return 0;
}
/* Get Sense SPI command
* From Marcus Comstedt
* cmd = 0x13
* cmd + 4 = length of returned buffer
* Returns 5 16 bit words
*/
static int gdrom_getsense(short *bufstring)
{
struct packet_command *sense_command;
short sense[5];
int sense_key;
int err = -EIO;
sense_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!sense_command)
return -ENOMEM;
sense_command->cmd[0] = 0x13;
sense_command->cmd[4] = 10;
sense_command->buflen = 10;
/* even if something is pending try to get
* the sense key if possible */
if (gd.pending && !gdrom_wait_clrbusy()) {
err = -EBUSY;
goto cleanup_sense_final;
}
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, sense_command);
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
if (gd.pending)
goto cleanup_sense;
insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
if (sense[1] & 40) {
pr_info("Drive not ready - command aborted\n");
goto cleanup_sense;
}
sense_key = sense[1] & 0x0F;
if (sense_key < ARRAY_SIZE(sense_texts))
pr_info("%s\n", sense_texts[sense_key].text);
else
pr_err("Unknown sense key: %d\n", sense_key);
if (bufstring) /* return addional sense data */
memcpy(bufstring, &sense[4], 2);
if (sense_key < 2)
err = 0;
cleanup_sense:
gd.pending = 0;
cleanup_sense_final:
kfree(sense_command);
return err;
}
static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
void *arg)
{
return -EINVAL;
}
static const struct cdrom_device_ops gdrom_ops = {
.open = gdrom_open,
.release = gdrom_release,
.drive_status = gdrom_drivestatus,
.check_events = gdrom_check_events,
.get_last_session = gdrom_get_last_session,
.reset = gdrom_hardreset,
.audio_ioctl = gdrom_audio_ioctl,
.generic_packet = cdrom_dummy_generic_packet,
.capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
};
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
{
int ret;
cdrom: do not call check_disk_change() inside cdrom_open() [ Upstream commit 2bbea6e117357d17842114c65e9a9cf2d13ae8a3 ] when mounting an ISO filesystem sometimes (very rarely) the system hangs because of a race condition between two tasks. PID: 6766 TASK: ffff88007b2a6dd0 CPU: 0 COMMAND: "mount" #0 [ffff880078447ae0] __schedule at ffffffff8168d605 #1 [ffff880078447b48] schedule_preempt_disabled at ffffffff8168ed49 #2 [ffff880078447b58] __mutex_lock_slowpath at ffffffff8168c995 #3 [ffff880078447bb8] mutex_lock at ffffffff8168bdef #4 [ffff880078447bd0] sr_block_ioctl at ffffffffa00b6818 [sr_mod] #5 [ffff880078447c10] blkdev_ioctl at ffffffff812fea50 #6 [ffff880078447c70] ioctl_by_bdev at ffffffff8123a8b3 #7 [ffff880078447c90] isofs_fill_super at ffffffffa04fb1e1 [isofs] #8 [ffff880078447da8] mount_bdev at ffffffff81202570 #9 [ffff880078447e18] isofs_mount at ffffffffa04f9828 [isofs] #10 [ffff880078447e28] mount_fs at ffffffff81202d09 #11 [ffff880078447e70] vfs_kern_mount at ffffffff8121ea8f #12 [ffff880078447ea8] do_mount at ffffffff81220fee #13 [ffff880078447f28] sys_mount at ffffffff812218d6 #14 [ffff880078447f80] system_call_fastpath at ffffffff81698c49 RIP: 00007fd9ea914e9a RSP: 00007ffd5d9bf648 RFLAGS: 00010246 RAX: 00000000000000a5 RBX: ffffffff81698c49 RCX: 0000000000000010 RDX: 00007fd9ec2bc210 RSI: 00007fd9ec2bc290 RDI: 00007fd9ec2bcf30 RBP: 0000000000000000 R8: 0000000000000000 R9: 0000000000000010 R10: 00000000c0ed0001 R11: 0000000000000206 R12: 00007fd9ec2bc040 R13: 00007fd9eb6b2380 R14: 00007fd9ec2bc210 R15: 00007fd9ec2bcf30 ORIG_RAX: 00000000000000a5 CS: 0033 SS: 002b This task was trying to mount the cdrom. It allocated and configured a super_block struct and owned the write-lock for the super_block->s_umount rwsem. While exclusively owning the s_umount lock, it called sr_block_ioctl and waited to acquire the global sr_mutex lock. PID: 6785 TASK: ffff880078720fb0 CPU: 0 COMMAND: "systemd-udevd" #0 [ffff880078417898] __schedule at ffffffff8168d605 #1 [ffff880078417900] schedule at ffffffff8168dc59 #2 [ffff880078417910] rwsem_down_read_failed at ffffffff8168f605 #3 [ffff880078417980] call_rwsem_down_read_failed at ffffffff81328838 #4 [ffff8800784179d0] down_read at ffffffff8168cde0 #5 [ffff8800784179e8] get_super at ffffffff81201cc7 #6 [ffff880078417a10] __invalidate_device at ffffffff8123a8de #7 [ffff880078417a40] flush_disk at ffffffff8123a94b #8 [ffff880078417a88] check_disk_change at ffffffff8123ab50 #9 [ffff880078417ab0] cdrom_open at ffffffffa00a29e1 [cdrom] #10 [ffff880078417b68] sr_block_open at ffffffffa00b6f9b [sr_mod] #11 [ffff880078417b98] __blkdev_get at ffffffff8123ba86 #12 [ffff880078417bf0] blkdev_get at ffffffff8123bd65 #13 [ffff880078417c78] blkdev_open at ffffffff8123bf9b #14 [ffff880078417c90] do_dentry_open at ffffffff811fc7f7 #15 [ffff880078417cd8] vfs_open at ffffffff811fc9cf #16 [ffff880078417d00] do_last at ffffffff8120d53d #17 [ffff880078417db0] path_openat at ffffffff8120e6b2 #18 [ffff880078417e48] do_filp_open at ffffffff8121082b #19 [ffff880078417f18] do_sys_open at ffffffff811fdd33 #20 [ffff880078417f70] sys_open at ffffffff811fde4e #21 [ffff880078417f80] system_call_fastpath at ffffffff81698c49 RIP: 00007f29438b0c20 RSP: 00007ffc76624b78 RFLAGS: 00010246 RAX: 0000000000000002 RBX: ffffffff81698c49 RCX: 0000000000000000 RDX: 00007f2944a5fa70 RSI: 00000000000a0800 RDI: 00007f2944a5fa70 RBP: 00007f2944a5f540 R8: 0000000000000000 R9: 0000000000000020 R10: 00007f2943614c40 R11: 0000000000000246 R12: ffffffff811fde4e R13: ffff880078417f78 R14: 000000000000000c R15: 00007f2944a4b010 ORIG_RAX: 0000000000000002 CS: 0033 SS: 002b This task tried to open the cdrom device, the sr_block_open function acquired the global sr_mutex lock. The call to check_disk_change() then saw an event flag indicating a possible media change and tried to flush any cached data for the device. As part of the flush, it tried to acquire the super_block->s_umount lock associated with the cdrom device. This was the same super_block as created and locked by the previous task. The first task acquires the s_umount lock and then the sr_mutex_lock; the second task acquires the sr_mutex_lock and then the s_umount lock. This patch fixes the issue by moving check_disk_change() out of cdrom_open() and let the caller take care of it. Signed-off-by: Maurizio Lombardi <mlombard@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7 years ago
check_disk_change(bdev);
mutex_lock(&gdrom_mutex);
ret = cdrom_open(gd.cd_info, bdev, mode);
mutex_unlock(&gdrom_mutex);
return ret;
}
static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&gdrom_mutex);
cdrom_release(gd.cd_info, mode);
mutex_unlock(&gdrom_mutex);
}
static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
unsigned int clearing)
{
return cdrom_check_events(gd.cd_info, clearing);
}
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
int ret;
mutex_lock(&gdrom_mutex);
ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
mutex_unlock(&gdrom_mutex);
return ret;
}
static const struct block_device_operations gdrom_bdops = {
.owner = THIS_MODULE,
.open = gdrom_bdops_open,
.release = gdrom_bdops_release,
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
{
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.pending != 1)
return IRQ_HANDLED;
gd.pending = 0;
wake_up_interruptible(&command_queue);
return IRQ_HANDLED;
}
static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
{
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.transfer != 1)
return IRQ_HANDLED;
gd.transfer = 0;
wake_up_interruptible(&request_queue);
return IRQ_HANDLED;
}
static int gdrom_set_interrupt_handlers(void)
{
int err;
err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
0, "gdrom_command", &gd);
if (err)
return err;
err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
0, "gdrom_dma", &gd);
if (err)
free_irq(HW_EVENT_GDROM_CMD, &gd);
return err;
}
/* Implement DMA read using SPI command
* 0 -> 0x30
* 1 -> mode
* 2 -> block >> 16
* 3 -> block >> 8
* 4 -> block
* 8 -> sectors >> 16
* 9 -> sectors >> 8
* 10 -> sectors
*/
static void gdrom_readdisk_dma(struct work_struct *work)
{
int block, block_cnt;
blk_status_t err;
struct packet_command *read_command;
struct list_head *elem, *next;
struct request *req;
unsigned long timeout;
if (list_empty(&gdrom_deferred))
return;
read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!read_command)
return; /* get more memory later? */
read_command->cmd[0] = 0x30;
read_command->cmd[1] = 0x20;
spin_lock(&gdrom_lock);
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
block: convert to pos and nr_sectors accessors With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Mike Miller <mike.miller@hp.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Dario Ballabio <ballabio_dario@emc.com> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: unsik Kim <donari75@gmail.com> Cc: Laurent Vivier <Laurent@lvivier.info> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
16 years ago
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
__raw_writel(1, GDROM_DMA_ENABLE_REG);
read_command->cmd[2] = (block >> 16) & 0xFF;
read_command->cmd[3] = (block >> 8) & 0xFF;
read_command->cmd[4] = block & 0xFF;
read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
read_command->cmd[10] = block_cnt & 0xFF;
/* set for DMA */
__raw_writeb(1, GDROM_ERROR_REG);
/* other registers */
__raw_writeb(0, GDROM_SECNUM_REG);
__raw_writeb(0, GDROM_BCL_REG);
__raw_writeb(0, GDROM_BCH_REG);
__raw_writeb(0, GDROM_DSEL_REG);
__raw_writeb(0, GDROM_INTSEC_REG);
/* Wait for registers to reset after any previous activity */
timeout = jiffies + HZ / 2;
while (gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
timeout = jiffies + HZ / 2;
/* Wait for packet command to finish */
while (gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
gd.pending = 1;
gd.transfer = 1;
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
timeout = jiffies + HZ / 2;
/* Wait for any pending DMA to finish */
while (__raw_readb(GDROM_DMA_STATUS_REG) &&
time_before(jiffies, timeout))
cpu_relax();
/* start transfer */
__raw_writeb(1, GDROM_DMA_STATUS_REG);
wait_event_interruptible_timeout(request_queue,
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
gd.transfer = 0;
gd.pending = 0;
/* now seek to take the request spinlock
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
__blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Mike Miller <mike.miller@hp.com> Cc: unsik Kim <donari75@gmail.com> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Laurent Vivier <Laurent@lvivier.info> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Adrian McMenamin <adrian@mcmen.demon.co.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Stefan Weinhuber <wein@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
16 years ago
while ((req = blk_fetch_request(rq)) != NULL) {
switch (req_op(req)) {
case REQ_OP_READ:
/*
* Add to list of deferred work and then schedule
* workqueue.
*/
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
break;
case REQ_OP_WRITE:
pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, BLK_STS_IOERR);
break;
default:
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
__blk_end_request_all(req, BLK_STS_IOERR);
break;
}
}
}
/* Print string identifying GD ROM device */
static int gdrom_outputversion(void)
{
struct gdrom_id *id;
char *model_name, *manuf_name, *firmw_ver;
int err = -ENOMEM;
/* query device ID */
id = kzalloc(sizeof(struct gdrom_id), GFP_KERNEL);
if (!id)
return err;
gdrom_identifydevice(id);
model_name = kstrndup(id->modname, 16, GFP_KERNEL);
if (!model_name)
goto free_id;
manuf_name = kstrndup(id->mname, 16, GFP_KERNEL);
if (!manuf_name)
goto free_model_name;
firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
if (!firmw_ver)
goto free_manuf_name;
pr_info("%s from %s with firmware %s\n",
model_name, manuf_name, firmw_ver);
err = 0;
kfree(firmw_ver);
free_manuf_name:
kfree(manuf_name);
free_model_name:
kfree(model_name);
free_id:
kfree(id);
return err;
}
/* set the default mode for DMA transfer */
static int gdrom_init_dma_mode(void)
{
__raw_writeb(0x13, GDROM_ERROR_REG);
__raw_writeb(0x22, GDROM_INTSEC_REG);
if (!gdrom_wait_clrbusy())
return -EBUSY;
__raw_writeb(0xEF, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return -EBUSY;
/* Memory protection setting for GDROM DMA
* Bits 31 - 16 security: 0x8843
* Bits 15 and 7 reserved (0)
* Bits 14 - 8 start of transfer range in 1 MB blocks OR'ed with 0x80
* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
* (0x40 | 0x80) = start range at 0x0C000000
* (0x7F | 0x80) = end range at 0x0FFFFFFF */
__raw_writel(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
__raw_writel(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
return 0;
}
static void probe_gdrom_setupcd(void)
{
gd.cd_info->ops = &gdrom_ops;
gd.cd_info->capacity = 1;
strcpy(gd.cd_info->name, GDROM_DEV_NAME);
gd.cd_info->mask = CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|
CDC_SELECT_DISC;
}
static void probe_gdrom_setupdisk(void)
{
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
gd.disk->minors = 1;
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
static int probe_gdrom_setupqueue(void)
{
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
gd.disk->queue = gd.gdrom_rq;
return gdrom_init_dma_mode();
}
/*
* register this as a block device and as compliant with the
* universal CD Rom driver interface
*/
static int probe_gdrom(struct platform_device *devptr)
{
int err;
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warning("ATA Probe for GDROM failed\n");
return -ENODEV;
}
/* Print out firmware ID */
if (gdrom_outputversion())
return -ENOMEM;
/* Register GDROM */
gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
if (gdrom_major <= 0)
return gdrom_major;
pr_info("Registered with major number %d\n",
gdrom_major);
/* Specify basic properties of drive */
gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
if (!gd.cd_info) {
err = -ENOMEM;
goto probe_fail_no_mem;
}
probe_gdrom_setupcd();
gd.disk = alloc_disk(1);
if (!gd.disk) {
err = -ENODEV;
goto probe_fail_no_disk;
}
probe_gdrom_setupdisk();
if (register_cdrom(gd.cd_info)) {
err = -ENODEV;
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
goto probe_fail_cmdirq_register;
gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
if (!gd.gdrom_rq) {
err = -ENOMEM;
goto probe_fail_requestq;
}
blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
err = probe_gdrom_setupqueue();
if (err)
goto probe_fail_toc;
gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
if (!gd.toc) {
err = -ENOMEM;
goto probe_fail_toc;
}
add_disk(gd.disk);
return 0;
probe_fail_toc:
blk_cleanup_queue(gd.gdrom_rq);
probe_fail_requestq:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
probe_fail_cmdirq_register:
probe_fail_cdrom_register:
del_gendisk(gd.disk);
probe_fail_no_disk:
kfree(gd.cd_info);
probe_fail_no_mem:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
gdrom_major = 0;
pr_warning("Probe failed - error is 0x%X\n", err);
return err;
}
static int remove_gdrom(struct platform_device *devptr)
{
workqueue: deprecate flush[_delayed]_work_sync() flush[_delayed]_work_sync() are now spurious. Mark them deprecated and convert all users to flush[_delayed]_work(). If you're cc'd and wondering what's going on: Now all workqueues are non-reentrant and the regular flushes guarantee that the work item is not pending or running on any CPU on return, so there's no reason to use the sync flushes at all and they're going away. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Mattia Dongili <malattia@linux.it> Cc: Kent Yoder <key@linux.vnet.ibm.com> Cc: David Airlie <airlied@linux.ie> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Karsten Keil <isdn@linux-pingi.de> Cc: Bryan Wu <bryan.wu@canonical.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Florian Tobias Schandinat <FlorianSchandinat@gmx.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: linux-wireless@vger.kernel.org Cc: Anton Vorontsov <cbou@mail.ru> Cc: Sangbeom Kim <sbkim73@samsung.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Eric Van Hensbergen <ericvh@gmail.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Petr Vandrovec <petr@vandrovec.name> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Avi Kivity <avi@redhat.com>
13 years ago
flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
del_gendisk(gd.disk);
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
return 0;
}
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
.remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
};
static int __init init_gdrom(void)
{
int rc;
gd.toc = NULL;
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
pd = platform_device_register_simple(GDROM_DEV_NAME, -1, NULL, 0);
if (IS_ERR(pd)) {
platform_driver_unregister(&gdrom_driver);
return PTR_ERR(pd);
}
return 0;
}
static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
kfree(gd.cd_info);
}
module_init(init_gdrom);
module_exit(exit_gdrom);
MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
MODULE_DESCRIPTION("SEGA Dreamcast GD-ROM Driver");
MODULE_LICENSE("GPL");