extractedLnx/linux/drivers/scsi/sd.c_requeue_sd_request.c
static void requeue_sd_request (Scsi_Cmnd * SCpnt)
{
int dev, devm, block, this_count;
unsigned char cmd[10];
char nbuff[6];
int bounce_size, contiguous;
int max_sg;
struct buffer_head * bh, *bhp;
char * buff, *bounce_buffer;
repeat:
if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
do_sd_request();
return;
}
devm = SD_PARTITION(SCpnt->request.rq_dev);
dev = DEVICE_NR(SCpnt->request.rq_dev);
block = SCpnt->request.sector;
this_count = 0;
SCSI_LOG_HLQUEUE(1,printk("Doing sd request, dev = %d, block = %d\n", devm, block));
if (devm >= (sd_template.dev_max << 4) ||
!rscsi_disks[dev].device ||
!rscsi_disks[dev].device->online ||
block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
{
SCSI_LOG_HLQUEUE(2,printk("Finishing %ld sectors\n", SCpnt->request.nr_sectors));
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
SCSI_LOG_HLQUEUE(2,printk("Retry with 0x%p\n", SCpnt));
goto repeat;
}
block += sd[devm].start_sect;
if (rscsi_disks[dev].device->changed)
{
/*
* quietly refuse to do anything to a changed disc until the changed
* bit has been reset
*/
/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
sd_devname(devm >> 4, nbuff);
SCSI_LOG_HLQUEUE(2,printk("%s : real dev = /dev/%d, block = %d\n",
nbuff, dev, block));
/*
* If we have a 1K hardware sectorsize, prevent access to single
* 512 byte sectors. In theory we could handle this - in fact
* the scsi cdrom driver must be able to handle this because
* we typically use 1K blocksizes, and cdroms typically have
* 2K hardware sectorsizes. Of course, things are simpler
* with the cdrom, since it is read-only. For performance
* reasons, the filesystems should be able to handle this
* and not force the scsi disk driver to use bounce buffers
* for this.
*/
if (rscsi_disks[dev].sector_size == 1024)
if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
printk("sd.c:Bad block number/count requested");
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
if (rscsi_disks[dev].sector_size == 2048)
if((block & 3) || (SCpnt->request.nr_sectors & 3)) {
printk("sd.c:Bad block number/count requested");
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
if (rscsi_disks[dev].sector_size == 4096)
if((block & 7) || (SCpnt->request.nr_sectors & 7)) {
printk("sd.cBad block number/count requested");
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
switch (SCpnt->request.cmd)
{
case WRITE :
if (!rscsi_disks[dev].device->writeable)
{
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
cmd[0] = WRITE_6;
break;
case READ :
cmd[0] = READ_6;
break;
default :
panic ("Unknown sd command %d\n", SCpnt->request.cmd);
}
SCpnt->this_count = 0;
/* If the host adapter can deal with very large scatter-gather
* requests, it is a waste of time to cluster
*/
contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
bounce_buffer = NULL;
bounce_size = (SCpnt->request.nr_sectors << 9);
/* First see if we need a bounce buffer for this request. If we do, make
* sure that we can allocate a buffer. Do not waste space by allocating
* a bounce buffer if we are straddling the 16Mb line
*/
if (contiguous && SCpnt->request.bh &&
virt_to_phys(SCpnt->request.bh->b_data)
+ (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
&& SCpnt->host->unchecked_isa_dma) {
if(virt_to_phys(SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
bounce_buffer = (char *) scsi_malloc(bounce_size);
if(!bounce_buffer) contiguous = 0;
}
if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
bhp = bhp->b_reqnext) {
if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
contiguous = 0;
break;
}
}
if (!SCpnt->request.bh || contiguous) {
/* case of page request (i.e. raw device), or unlinked buffer */
this_count = SCpnt->request.nr_sectors;
buff = SCpnt->request.buffer;
SCpnt->use_sg = 0;
} else if (SCpnt->host->sg_tablesize == 0 ||
(scsi_need_isa_buffer && scsi_dma_free_sectors <= 10)) {
/* Case of host adapter that cannot scatter-gather. We also
* come here if we are running low on DMA buffer memory. We set
* a threshold higher than that we would need for this request so
* we leave room for other requests. Even though we would not need
* it all, we need to be conservative, because if we run low enough
* we have no choice but to panic.
*/
if (SCpnt->host->sg_tablesize != 0 &&
scsi_need_isa_buffer &&
scsi_dma_free_sectors <= 10)
printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
this_count = SCpnt->request.current_nr_sectors;
buff = SCpnt->request.buffer;
SCpnt->use_sg = 0;
} else {
/* Scatter-gather capable host adapter */
struct scatterlist * sgpnt;
int count, this_count_max;
int counted;
bh = SCpnt->request.bh;
this_count = 0;
this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
count = 0;
bhp = NULL;
while(bh) {
if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
!CLUSTERABLE_DEVICE(SCpnt) ||
(SCpnt->host->unchecked_isa_dma &&
virt_to_phys(bh->b_data-1) == ISA_DMA_THRESHOLD)) {
if (count < SCpnt->host->sg_tablesize) count++;
else break;
}
this_count += (bh->b_size >> 9);
bhp = bh;
bh = bh->b_reqnext;
}
#if 0
if(SCpnt->host->unchecked_isa_dma &&
virt_to_phys(SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
#endif
SCpnt->use_sg = count; /* Number of chains */
/* scsi_malloc can only allocate in chunks of 512 bytes */
count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
SCpnt->sglist_len = count;
max_sg = count / sizeof(struct scatterlist);
if(SCpnt->host->sg_tablesize < max_sg)
max_sg = SCpnt->host->sg_tablesize;
sgpnt = (struct scatterlist * ) scsi_malloc(count);
if (!sgpnt) {
printk("Warning - running *really* short on DMA buffers\n");
SCpnt->use_sg = 0; /* No memory left - bail out */
this_count = SCpnt->request.current_nr_sectors;
buff = SCpnt->request.buffer;
} else {
memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
* if memory is available
*/
buff = (char *) sgpnt;
counted = 0;
for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
count < SCpnt->use_sg && bh;
count++, bh = bhp) {
bhp = bh->b_reqnext;
if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
sgpnt[count].length += bh->b_size;
counted += bh->b_size >> 9;
if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length - 1 >
ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
!sgpnt[count].alt_address) {
sgpnt[count].alt_address = sgpnt[count].address;
/* We try to avoid exhausting the DMA pool, since it is
* easier to control usage here. In other places we might
* have a more pressing need, and we would be screwed if
* we ran out */
if(scsi_dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
sgpnt[count].address = NULL;
} else {
sgpnt[count].address =
(char *) scsi_malloc(sgpnt[count].length);
}
/* If we start running low on DMA buffers, we abort the
* scatter-gather operation, and free all of the memory
* we have allocated. We want to ensure that all scsi
* operations are able to do at least a non-scatter/gather
* operation */
if(sgpnt[count].address == NULL){ /* Out of dma memory */
#if 0
printk("Warning: Running low on SCSI DMA buffers");
/* Try switching back to a non s-g operation. */
while(--count >= 0){
if(sgpnt[count].alt_address)
scsi_free(sgpnt[count].address,
sgpnt[count].length);
}
this_count = SCpnt->request.current_nr_sectors;
buff = SCpnt->request.buffer;
SCpnt->use_sg = 0;
scsi_free(sgpnt, SCpnt->sglist_len);
#endif
SCpnt->use_sg = count;
this_count = counted -= bh->b_size >> 9;
break;
}
}
/* Only cluster buffers if we know that we can supply DMA
* buffers large enough to satisfy the request. Do not cluster
* a new request if this would mean that we suddenly need to
* start using DMA bounce buffers */
if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
&& CLUSTERABLE_DEVICE(SCpnt)) {
char * tmp;
if (virt_to_phys(sgpnt[count].address) + sgpnt[count].length +
bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
(SCpnt->host->unchecked_isa_dma) &&
!sgpnt[count].alt_address) continue;
if(!sgpnt[count].alt_address) {count--; continue; }
if(scsi_dma_free_sectors > 10)
tmp = (char *) scsi_malloc(sgpnt[count].length
+ bhp->b_size);
else {
tmp = NULL;
max_sg = SCpnt->use_sg;
}
if(tmp){
scsi_free(sgpnt[count].address, sgpnt[count].length);
sgpnt[count].address = tmp;
count--;
continue;
}
/* If we are allowed another sg chain, then increment
* counter so we can insert it. Otherwise we will end
up truncating */
if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
} /* contiguous buffers */
} /* for loop */
/* This is actually how many we are going to transfer */
this_count = counted;
if(count < SCpnt->use_sg || SCpnt->use_sg
> SCpnt->host->sg_tablesize){
bh = SCpnt->request.bh;
printk("Use sg, count %d %x %d\n",
SCpnt->use_sg, count, scsi_dma_free_sectors);
printk("maxsg = %x, counted = %d this_count = %d\n",
max_sg, counted, this_count);
while(bh){
printk("[%p %x] ", bh->b_data, bh->b_size);
bh = bh->b_reqnext;
}
if(SCpnt->use_sg < 16)
for(count=0; count<SCpnt->use_sg; count++)
printk("{%d:%p %p %d} ", count,
sgpnt[count].address,
sgpnt[count].alt_address,
sgpnt[count].length);
panic("Ooops");
}
if (SCpnt->request.cmd == WRITE)
for(count=0; count<SCpnt->use_sg; count++)
if(sgpnt[count].alt_address)
memcpy(sgpnt[count].address, sgpnt[count].alt_address,
sgpnt[count].length);
} /* Able to malloc sgpnt */
} /* Host adapter capable of scatter-gather */
/* Now handle the possibility of DMA to addresses > 16Mb */
if(SCpnt->use_sg == 0){
if (virt_to_phys(buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
(SCpnt->host->unchecked_isa_dma)) {
if(bounce_buffer)
buff = bounce_buffer;
else
buff = (char *) scsi_malloc(this_count << 9);
if(buff == NULL) { /* Try backing off a bit if we are low on mem*/
this_count = SCpnt->request.current_nr_sectors;
buff = (char *) scsi_malloc(this_count << 9);
if(!buff) panic("Ran out of DMA buffers.");
}
if (SCpnt->request.cmd == WRITE)
memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
}
}
SCSI_LOG_HLQUEUE(2,printk("%s : %s %d/%ld 512 byte blocks.\n",
nbuff,
(SCpnt->request.cmd == WRITE) ? "writing" : "reading",
this_count, SCpnt->request.nr_sectors));
cmd[1] = (SCpnt->lun << 5) & 0xe0;
if (rscsi_disks[dev].sector_size == 4096){
if(block & 7) panic("sd.c:Bad block number requested");
if(this_count & 7) panic("sd.c:Bad block number requested");
block = block >> 3;
this_count = block >> 3;
}
if (rscsi_disks[dev].sector_size == 2048){
if(block & 3) panic("sd.c:Bad block number requested");
if(this_count & 3) panic("sd.c:Bad block number requested");
block = block >> 2;
this_count = this_count >> 2;
}
if (rscsi_disks[dev].sector_size == 1024){
if(block & 1) panic("sd.c:Bad block number requested");
if(this_count & 1) panic("sd.c:Bad block number requested");
block = block >> 1;
this_count = this_count >> 1;
}
if (rscsi_disks[dev].sector_size == 256){
block = block << 1;
this_count = this_count << 1;
}
if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
{
if (this_count > 0xffff)
this_count = 0xffff;
cmd[0] += READ_10 - READ_6 ;
cmd[2] = (unsigned char) (block >> 24) & 0xff;
cmd[3] = (unsigned char) (block >> 16) & 0xff;
cmd[4] = (unsigned char) (block >> 8) & 0xff;
cmd[5] = (unsigned char) block & 0xff;
cmd[6] = cmd[9] = 0;
cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
cmd[8] = (unsigned char) this_count & 0xff;
}
else
{
if (this_count > 0xff)
this_count = 0xff;
cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
cmd[2] = (unsigned char) ((block >> 8) & 0xff);
cmd[3] = (unsigned char) block & 0xff;
cmd[4] = (unsigned char) this_count;
cmd[5] = 0;
}
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
* host adapter, it's safe to assume that we can at least transfer
* this many bytes between each connect / disconnect.
*/
SCpnt->transfersize = rscsi_disks[dev].sector_size;
SCpnt->underflow = this_count << 9;
scsi_do_cmd (SCpnt, (void *) cmd, buff,
this_count * rscsi_disks[dev].sector_size,
rw_intr,
(SCpnt->device->type == TYPE_DISK ?
SD_TIMEOUT : SD_MOD_TIMEOUT),
MAX_RETRIES);
}
Generated by GNU enscript 1.6.4.