extractedLnx/linux-2.4.37/drivers/block/cciss.c_cciss_ioctl.c
static int cciss_ioctl(struct inode *inode, struct file *filep,
unsigned int cmd, unsigned long arg)
{
int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
int dsk = MINOR(inode->i_rdev) >> NWD_SHIFT;
#ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
#endif /* CCISS_DEBUG */
switch(cmd) {
case HDIO_GETGEO:
{
struct hd_geometry driver_geo;
if (hba[ctlr]->drv[dsk].cylinders) {
driver_geo.heads = hba[ctlr]->drv[dsk].heads;
driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
} else
return -ENXIO;
driver_geo.start=
hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
if (copy_to_user((void *) arg, &driver_geo,
sizeof( struct hd_geometry)))
return -EFAULT;
return 0;
}
case HDIO_GETGEO_BIG:
{
struct hd_big_geometry driver_geo;
if (hba[ctlr]->drv[dsk].cylinders) {
driver_geo.heads = hba[ctlr]->drv[dsk].heads;
driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
} else
return -ENXIO;
driver_geo.start=
hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
if (copy_to_user((void *) arg, &driver_geo,
sizeof( struct hd_big_geometry)))
return -EFAULT;
return 0;
}
case BLKRRPART:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return revalidate_logvol(inode->i_rdev, 1);
case BLKGETSIZE:
case BLKGETSIZE64:
case BLKFLSBUF:
case BLKBSZSET:
case BLKBSZGET:
case BLKROSET:
case BLKROGET:
case BLKRASET:
case BLKRAGET:
case BLKPG:
case BLKELVGET:
case BLKELVSET:
return blk_ioctl(inode->i_rdev, cmd, arg);
case CCISS_GETPCIINFO:
{
cciss_pci_info_struct pciinfo;
if (!arg)
return -EINVAL;
pciinfo.bus = hba[ctlr]->pdev->bus->number;
pciinfo.dev_fn = hba[ctlr]->pdev->devfn;
pciinfo.board_id = hba[ctlr]->board_id;
if (copy_to_user((void *) arg, &pciinfo, sizeof( cciss_pci_info_struct )))
return -EFAULT;
return 0;
}
case CCISS_GETINTINFO:
{
cciss_coalint_struct intinfo;
ctlr_info_t *c = hba[ctlr];
if (!arg)
return -EINVAL;
intinfo.delay = readl(&c->cfgtable->HostWrite.CoalIntDelay);
intinfo.count = readl(&c->cfgtable->HostWrite.CoalIntCount);
if (copy_to_user((void *) arg, &intinfo, sizeof( cciss_coalint_struct )))
return -EFAULT;
return 0;
}
case CCISS_SETINTINFO:
{
cciss_coalint_struct intinfo;
ctlr_info_t *c = hba[ctlr];
unsigned long flags;
int i;
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&intinfo, (void *) arg, sizeof( cciss_coalint_struct)))
return -EFAULT;
if ( (intinfo.delay == 0 ) && (intinfo.count == 0)) {
return -EINVAL;
}
spin_lock_irqsave(&io_request_lock, flags);
/* Can only safely update if no commands outstanding */
if (c->commands_outstanding > 0 ) {
spin_unlock_irqrestore(&io_request_lock, flags);
return -EINVAL;
}
/* Update the field, and then ring the doorbell */
writel( intinfo.delay,
&(c->cfgtable->HostWrite.CoalIntDelay));
writel( intinfo.count,
&(c->cfgtable->HostWrite.CoalIntCount));
writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
if (!(readl(c->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
spin_unlock_irqrestore(&io_request_lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
/* there is an unlikely case where this can happen,
* involving hot replacing a failed 144 GB drive in a
* RAID 5 set just as we attempt this ioctl. */
return -EAGAIN;
return 0;
}
case CCISS_GETNODENAME:
{
NodeName_type NodeName;
ctlr_info_t *c = hba[ctlr];
int i;
if (!arg)
return -EINVAL;
for(i=0;i<16;i++)
NodeName[i] = readb(&c->cfgtable->ServerName[i]);
if (copy_to_user((void *) arg, NodeName, sizeof( NodeName_type)))
return -EFAULT;
return 0;
}
case CCISS_SETNODENAME:
{
NodeName_type NodeName;
ctlr_info_t *c = hba[ctlr];
unsigned long flags;
int i;
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(NodeName, (void *) arg, sizeof( NodeName_type)))
return -EFAULT;
spin_lock_irqsave(&io_request_lock, flags);
/* Update the field, and then ring the doorbell */
for(i=0;i<16;i++)
writeb( NodeName[i], &c->cfgtable->ServerName[i]);
writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
if (!(readl(c->vaddr + SA5_DOORBELL)
& CFGTBL_ChangeReq))
break;
/* delay and try again */
udelay(1000);
}
spin_unlock_irqrestore(&io_request_lock, flags);
if (i >= MAX_IOCTL_CONFIG_WAIT)
/* there is an unlikely case where this can happen,
* involving hot replacing a failed 144 GB drive in a
* RAID 5 set just as we attempt this ioctl. */
return -EAGAIN;
return 0;
}
case CCISS_GETHEARTBEAT:
{
Heartbeat_type heartbeat;
ctlr_info_t *c = hba[ctlr];
if (!arg)
return -EINVAL;
heartbeat = readl(&c->cfgtable->HeartBeat);
if (copy_to_user((void *) arg, &heartbeat, sizeof( Heartbeat_type)))
return -EFAULT;
return 0;
}
case CCISS_GETBUSTYPES:
{
BusTypes_type BusTypes;
ctlr_info_t *c = hba[ctlr];
if (!arg)
return -EINVAL;
BusTypes = readl(&c->cfgtable->BusTypes);
if (copy_to_user((void *) arg, &BusTypes, sizeof( BusTypes_type) ))
return -EFAULT;
return 0;
}
case CCISS_GETFIRMVER:
{
FirmwareVer_type firmware;
if (!arg)
return -EINVAL;
memcpy(firmware, hba[ctlr]->firm_ver, 4);
if (copy_to_user((void *) arg, firmware, sizeof( FirmwareVer_type)))
return -EFAULT;
return 0;
}
case CCISS_GETDRIVVER:
{
DriverVer_type DriverVer = DRIVER_VERSION;
if (!arg)
return -EINVAL;
if (copy_to_user((void *) arg, &DriverVer, sizeof( DriverVer_type) ))
return -EFAULT;
return 0;
}
case CCISS_RESCANDISK:
{
return cciss_rescan_disk(ctlr, dsk);
}
case CCISS_DEREGDISK:
return deregister_disk(ctlr,dsk);
case CCISS_REGNEWD:
return register_new_disk(ctlr, dsk, 0);
case CCISS_REGNEWDISK:
{
__u64 new_logvol;
if (!arg)
return -EINVAL;
if (copy_from_user(&new_logvol, (void *) arg,
sizeof( __u64)))
return -EFAULT;
return register_new_disk(ctlr, dsk, new_logvol);
}
case CCISS_GETLUNINFO:
{
LogvolInfo_struct luninfo;
int num_parts = 0;
int i, start;
luninfo.LunID = hba[ctlr]->drv[dsk].LunID;
luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
/* count partitions 1 to 15 with sizes > 0 */
start = (dsk << NWD_SHIFT);
for(i=1; i <MAX_PART; i++) {
int minor = start+i;
if (hba[ctlr]->sizes[minor] != 0)
num_parts++;
}
luninfo.num_parts = num_parts;
if (copy_to_user((void *) arg, &luninfo,
sizeof( LogvolInfo_struct) ))
return -EFAULT;
return 0;
}
case CCISS_PASSTHRU:
{
IOCTL_Command_struct iocommand;
ctlr_info_t *h = hba[ctlr];
CommandList_struct *c;
char *buff = NULL;
u64bit temp64;
unsigned long flags;
DECLARE_COMPLETION(wait);
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&iocommand, (void *) arg, sizeof( IOCTL_Command_struct) ))
return -EFAULT;
if ((iocommand.buf_size < 1) &&
(iocommand.Request.Type.Direction
!= XFER_NONE)) {
return -EINVAL;
}
/* Check kmalloc limits */
if (iocommand.buf_size > 128000)
return -EINVAL;
if (iocommand.buf_size > 0) {
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -ENOMEM;
}
if (iocommand.Request.Type.Direction == XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
{
kfree(buff);
return -EFAULT;
}
}
else
memset(buff, 0, iocommand.buf_size);
if ((c = cmd_alloc(h , 0)) == NULL) {
kfree(buff);
return -ENOMEM;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
c->Header.SGList = 1;
c->Header.SGTotal= 1;
} else { /* no buffers to fill */
c->Header.SGList = 0;
c->Header.SGTotal= 0;
}
c->Header.LUN = iocommand.LUN_info;
c->Header.Tag.lower = c->busaddr; /* use the kernel address */
/* the cmd block for tag */
/* Fill in Request block */
c->Request = iocommand.Request;
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0 ) {
temp64.val = pci_map_single( h->pdev, buff,
iocommand.buf_size,
PCI_DMA_BIDIRECTIONAL);
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; /* we are not chaining */
}
c->waiting = &wait;
/* Put the request on the tail of the request queue */
spin_lock_irqsave(&io_request_lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
spin_unlock_irqrestore(&io_request_lock, flags);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
temp64.val32.lower = c->SG[0].Addr.lower;
temp64.val32.upper = c->SG[0].Addr.upper;
pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
/* Copy the error information out */
iocommand.error_info = *(c->err_info);
if (copy_to_user((void *) arg, &iocommand,
sizeof( IOCTL_Command_struct) ) ) {
kfree(buff);
cmd_free(h, c, 0);
return( -EFAULT);
}
if (iocommand.Request.Type.Direction == XFER_READ) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff,
iocommand.buf_size)) {
kfree(buff);
cmd_free(h, c, 0);
return -EFAULT;
}
}
kfree(buff);
cmd_free(h, c, 0);
return 0;
}
case CCISS_BIG_PASSTHRU:
{
BIG_IOCTL_Command_struct iocommand;
ctlr_info_t *h = hba[ctlr];
CommandList_struct *c;
char *buff[MAXSGENTRIES] = {NULL,};
int buff_size[MAXSGENTRIES] = {0,};
u64bit temp64;
unsigned long flags;
BYTE sg_used = 0;
int status = 0;
int i;
DECLARE_COMPLETION(wait);
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&iocommand, (void *) arg, sizeof( BIG_IOCTL_Command_struct) ))
return -EFAULT;
if ((iocommand.buf_size < 1) &&
(iocommand.Request.Type.Direction != XFER_NONE)) {
return -EINVAL;
}
/* Check kmalloc limits using all SGs */
if (iocommand.malloc_size > MAX_KMALLOC_SIZE)
return -EINVAL;
if (iocommand.buf_size > iocommand.malloc_size * MAXSGENTRIES)
return -EINVAL;
if (iocommand.buf_size > 0) {
__u32 size_left_alloc = iocommand.buf_size;
BYTE *data_ptr = (BYTE *) iocommand.buf;
while (size_left_alloc > 0) {
buff_size[sg_used] = (size_left_alloc
> iocommand.malloc_size)
? iocommand.malloc_size : size_left_alloc;
buff[sg_used] = kmalloc( buff_size[sg_used],
GFP_KERNEL);
if (buff[sg_used] == NULL) {
status = -ENOMEM;
goto cleanup1;
}
if (iocommand.Request.Type.Direction ==
XFER_WRITE) {
/* Copy the data into the buffer created */
if (copy_from_user(buff[sg_used], data_ptr,
buff_size[sg_used])) {
status = -EFAULT;
goto cleanup1;
}
}
else
memset(buff[sg_used], 0, buff_size[sg_used]);
size_left_alloc -= buff_size[sg_used];
data_ptr += buff_size[sg_used];
sg_used++;
}
}
if ((c = cmd_alloc(h , 0)) == NULL) {
status = -ENOMEM;
goto cleanup1;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
c->Header.SGList = sg_used;
c->Header.SGTotal= sg_used;
} else { /* no buffers to fill */
c->Header.SGList = 0;
c->Header.SGTotal= 0;
}
c->Header.LUN = iocommand.LUN_info;
c->Header.Tag.lower = c->busaddr; /* use the kernel address */
/* the cmd block for tag */
/* Fill in Request block */
c->Request = iocommand.Request;
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0 ) {
int i;
for(i=0; i< sg_used; i++) {
temp64.val = pci_map_single( h->pdev, buff[i],
buff_size[i],
PCI_DMA_BIDIRECTIONAL);
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
c->SG[i].Ext = 0; /* we are not chaining */
}
}
c->waiting = &wait;
/* Put the request on the tail of the request queue */
spin_lock_irqsave(&io_request_lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
spin_unlock_irqrestore(&io_request_lock, flags);
wait_for_completion(&wait);
/* unlock the buffers from DMA */
for(i=0; i< sg_used; i++) {
temp64.val32.lower = c->SG[i].Addr.lower;
temp64.val32.upper = c->SG[i].Addr.upper;
pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
buff_size[i], PCI_DMA_BIDIRECTIONAL);
}
/* Copy the error information out */
iocommand.error_info = *(c->err_info);
if (copy_to_user((void *) arg, &iocommand,
sizeof( IOCTL_Command_struct) ) ) {
cmd_free(h, c, 0);
status = -EFAULT;
goto cleanup1;
}
if (iocommand.Request.Type.Direction == XFER_READ) {
/* Copy the data out of the buffer we created */
BYTE *ptr = (BYTE *) iocommand.buf;
for(i=0; i< sg_used; i++) {
if (copy_to_user(ptr, buff[i], buff_size[i])) {
cmd_free(h, c, 0);
status = -EFAULT;
goto cleanup1;
}
ptr += buff_size[i];
}
}
cmd_free(h, c, 0);
status = 0;
cleanup1:
for(i=0; i< sg_used; i++) {
if (buff[i] != NULL)
kfree(buff[i]);
}
return status;
}
default:
return -EBADRQC;
}
}
Generated by GNU enscript 1.6.4.