Showing error 1037

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: drivers/scsi/qla2xxx/qla_iocb.c
Line in file: 540
Project: Linux Kernel
Project version: 2.6.28
Tools: Undetermined 1
Entered: 2012-03-04 17:07:06 UTC


Source:

  1/*
  2 * QLogic Fibre Channel HBA Driver
  3 * Copyright (c)  2003-2008 QLogic Corporation
  4 *
  5 * See LICENSE.qla2xxx for copyright and licensing details.
  6 */
  7#include "qla_def.h"
  8
  9#include <linux/blkdev.h>
 10#include <linux/delay.h>
 11
 12#include <scsi/scsi_tcq.h>
 13
 14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
 15static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
 16
 17/**
 18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
 19 * @cmd: SCSI command
 20 *
 21 * Returns the proper CF_* direction based on CDB.
 22 */
 23static inline uint16_t
 24qla2x00_get_cmd_direction(srb_t *sp)
 25{
 26        uint16_t cflags;
 27
 28        cflags = 0;
 29
 30        /* Set transfer direction */
 31        if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
 32                cflags = CF_WRITE;
 33                sp->fcport->ha->qla_stats.output_bytes +=
 34                    scsi_bufflen(sp->cmd);
 35        } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
 36                cflags = CF_READ;
 37                sp->fcport->ha->qla_stats.input_bytes +=
 38                    scsi_bufflen(sp->cmd);
 39        }
 40        return (cflags);
 41}
 42
 43/**
 44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
 45 * Continuation Type 0 IOCBs to allocate.
 46 *
 47 * @dsds: number of data segment decriptors needed
 48 *
 49 * Returns the number of IOCB entries needed to store @dsds.
 50 */
 51uint16_t
 52qla2x00_calc_iocbs_32(uint16_t dsds)
 53{
 54        uint16_t iocbs;
 55
 56        iocbs = 1;
 57        if (dsds > 3) {
 58                iocbs += (dsds - 3) / 7;
 59                if ((dsds - 3) % 7)
 60                        iocbs++;
 61        }
 62        return (iocbs);
 63}
 64
 65/**
 66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
 67 * Continuation Type 1 IOCBs to allocate.
 68 *
 69 * @dsds: number of data segment decriptors needed
 70 *
 71 * Returns the number of IOCB entries needed to store @dsds.
 72 */
 73uint16_t
 74qla2x00_calc_iocbs_64(uint16_t dsds)
 75{
 76        uint16_t iocbs;
 77
 78        iocbs = 1;
 79        if (dsds > 2) {
 80                iocbs += (dsds - 2) / 5;
 81                if ((dsds - 2) % 5)
 82                        iocbs++;
 83        }
 84        return (iocbs);
 85}
 86
 87/**
 88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
 89 * @ha: HA context
 90 *
 91 * Returns a pointer to the Continuation Type 0 IOCB packet.
 92 */
 93static inline cont_entry_t *
 94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
 95{
 96        cont_entry_t *cont_pkt;
 97
 98        /* Adjust ring index. */
 99        ha->req_ring_index++;
100        if (ha->req_ring_index == ha->request_q_length) {
101                ha->req_ring_index = 0;
102                ha->request_ring_ptr = ha->request_ring;
103        } else {
104                ha->request_ring_ptr++;
105        }
106
107        cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
108
109        /* Load packet defaults. */
110        *((uint32_t *)(&cont_pkt->entry_type)) =
111            __constant_cpu_to_le32(CONTINUE_TYPE);
112
113        return (cont_pkt);
114}
115
116/**
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @ha: HA context
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
124{
125        cont_a64_entry_t *cont_pkt;
126
127        /* Adjust ring index. */
128        ha->req_ring_index++;
129        if (ha->req_ring_index == ha->request_q_length) {
130                ha->req_ring_index = 0;
131                ha->request_ring_ptr = ha->request_ring;
132        } else {
133                ha->request_ring_ptr++;
134        }
135
136        cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
137
138        /* Load packet defaults. */
139        *((uint32_t *)(&cont_pkt->entry_type)) =
140            __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142        return (cont_pkt);
143}
144
145/**
146 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
147 * capable IOCB types.
148 *
149 * @sp: SRB command to process
150 * @cmd_pkt: Command type 2 IOCB
151 * @tot_dsds: Total number of segments to transfer
152 */
153void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
154    uint16_t tot_dsds)
155{
156        uint16_t        avail_dsds;
157        uint32_t        *cur_dsd;
158        scsi_qla_host_t        *ha;
159        struct scsi_cmnd *cmd;
160        struct scatterlist *sg;
161        int i;
162
163        cmd = sp->cmd;
164
165        /* Update entry type to indicate Command Type 2 IOCB */
166        *((uint32_t *)(&cmd_pkt->entry_type)) =
167            __constant_cpu_to_le32(COMMAND_TYPE);
168
169        /* No data transfer */
170        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
171                cmd_pkt->byte_count = __constant_cpu_to_le32(0);
172                return;
173        }
174
175        ha = sp->ha;
176
177        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178
179        /* Three DSDs are available in the Command Type 2 IOCB */
180        avail_dsds = 3;
181        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
182
183        /* Load data segments */
184        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
185                cont_entry_t *cont_pkt;
186
187                /* Allocate additional continuation packets? */
188                if (avail_dsds == 0) {
189                        /*
190                         * Seven DSDs are available in the Continuation
191                         * Type 0 IOCB.
192                         */
193                        cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
194                        cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195                        avail_dsds = 7;
196                }
197
198                *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
199                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
200                avail_dsds--;
201        }
202}
203
204/**
205 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
206 * capable IOCB types.
207 *
208 * @sp: SRB command to process
209 * @cmd_pkt: Command type 3 IOCB
210 * @tot_dsds: Total number of segments to transfer
211 */
212void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
213    uint16_t tot_dsds)
214{
215        uint16_t        avail_dsds;
216        uint32_t        *cur_dsd;
217        scsi_qla_host_t        *ha;
218        struct scsi_cmnd *cmd;
219        struct scatterlist *sg;
220        int i;
221
222        cmd = sp->cmd;
223
224        /* Update entry type to indicate Command Type 3 IOCB */
225        *((uint32_t *)(&cmd_pkt->entry_type)) =
226            __constant_cpu_to_le32(COMMAND_A64_TYPE);
227
228        /* No data transfer */
229        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
230                cmd_pkt->byte_count = __constant_cpu_to_le32(0);
231                return;
232        }
233
234        ha = sp->ha;
235
236        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237
238        /* Two DSDs are available in the Command Type 3 IOCB */
239        avail_dsds = 2;
240        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
241
242        /* Load data segments */
243        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
244                dma_addr_t        sle_dma;
245                cont_a64_entry_t *cont_pkt;
246
247                /* Allocate additional continuation packets? */
248                if (avail_dsds == 0) {
249                        /*
250                         * Five DSDs are available in the Continuation
251                         * Type 1 IOCB.
252                         */
253                        cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
254                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255                        avail_dsds = 5;
256                }
257
258                sle_dma = sg_dma_address(sg);
259                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
260                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
261                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
262                avail_dsds--;
263        }
264}
265
266/**
267 * qla2x00_start_scsi() - Send a SCSI command to the ISP
268 * @sp: command to send to the ISP
269 *
270 * Returns non-zero if a failure occurred, else zero.
271 */
272int
273qla2x00_start_scsi(srb_t *sp)
274{
275        int                ret, nseg;
276        unsigned long   flags;
277        scsi_qla_host_t        *ha;
278        struct scsi_cmnd *cmd;
279        uint32_t        *clr_ptr;
280        uint32_t        index;
281        uint32_t        handle;
282        cmd_entry_t        *cmd_pkt;
283        uint16_t        cnt;
284        uint16_t        req_cnt;
285        uint16_t        tot_dsds;
286        struct device_reg_2xxx __iomem *reg;
287
288        /* Setup device pointers. */
289        ret = 0;
290        ha = sp->ha;
291        reg = &ha->iobase->isp;
292        cmd = sp->cmd;
293        /* So we know we haven't pci_map'ed anything yet */
294        tot_dsds = 0;
295
296        /* Send marker if required */
297        if (ha->marker_needed != 0) {
298                if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
299                        return (QLA_FUNCTION_FAILED);
300                }
301                ha->marker_needed = 0;
302        }
303
304        /* Acquire ring specific lock */
305        spin_lock_irqsave(&ha->hardware_lock, flags);
306
307        /* Check for room in outstanding command list. */
308        handle = ha->current_outstanding_cmd;
309        for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310                handle++;
311                if (handle == MAX_OUTSTANDING_COMMANDS)
312                        handle = 1;
313                if (!ha->outstanding_cmds[handle])
314                        break;
315        }
316        if (index == MAX_OUTSTANDING_COMMANDS)
317                goto queuing_error;
318
319        /* Map the sg table so we have an accurate count of sg entries needed */
320        if (scsi_sg_count(cmd)) {
321                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
322                    scsi_sg_count(cmd), cmd->sc_data_direction);
323                if (unlikely(!nseg))
324                        goto queuing_error;
325        } else
326                nseg = 0;
327
328        tot_dsds = nseg;
329
330        /* Calculate the number of request entries needed. */
331        req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332        if (ha->req_q_cnt < (req_cnt + 2)) {
333                cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334                if (ha->req_ring_index < cnt)
335                        ha->req_q_cnt = cnt - ha->req_ring_index;
336                else
337                        ha->req_q_cnt = ha->request_q_length -
338                            (ha->req_ring_index - cnt);
339        }
340        if (ha->req_q_cnt < (req_cnt + 2))
341                goto queuing_error;
342
343        /* Build command packet */
344        ha->current_outstanding_cmd = handle;
345        ha->outstanding_cmds[handle] = sp;
346        sp->ha = ha;
347        sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348        ha->req_q_cnt -= req_cnt;
349
350        cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
351        cmd_pkt->handle = handle;
352        /* Zero out remaining portion of packet. */
353        clr_ptr = (uint32_t *)cmd_pkt + 2;
354        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
355        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
356
357        /* Set target ID and LUN number*/
358        SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
359        cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
360
361        /* Update tagged queuing modifier */
362        cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
363
364        /* Load SCSI command packet. */
365        memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
366        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
367
368        /* Build IOCB segments */
369        ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
370
371        /* Set total data segment count. */
372        cmd_pkt->entry_count = (uint8_t)req_cnt;
373        wmb();
374
375        /* Adjust ring index. */
376        ha->req_ring_index++;
377        if (ha->req_ring_index == ha->request_q_length) {
378                ha->req_ring_index = 0;
379                ha->request_ring_ptr = ha->request_ring;
380        } else
381                ha->request_ring_ptr++;
382
383        sp->flags |= SRB_DMA_VALID;
384
385        /* Set chip new ring index. */
386        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
387        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));        /* PCI Posting. */
388
389        /* Manage unprocessed RIO/ZIO commands in response queue. */
390        if (ha->flags.process_response_queue &&
391            ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
392                qla2x00_process_response_queue(ha);
393
394        spin_unlock_irqrestore(&ha->hardware_lock, flags);
395        return (QLA_SUCCESS);
396
397queuing_error:
398        if (tot_dsds)
399                scsi_dma_unmap(cmd);
400
401        spin_unlock_irqrestore(&ha->hardware_lock, flags);
402
403        return (QLA_FUNCTION_FAILED);
404}
405
406/**
407 * qla2x00_marker() - Send a marker IOCB to the firmware.
408 * @ha: HA context
409 * @loop_id: loop ID
410 * @lun: LUN
411 * @type: marker modifier
412 *
413 * Can be called from both normal and interrupt context.
414 *
415 * Returns non-zero if a failure occurred, else zero.
416 */
417int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
419    uint8_t type)
420{
421        mrk_entry_t *mrk;
422        struct mrk_entry_24xx *mrk24;
423        scsi_qla_host_t *pha = to_qla_parent(ha);
424
425        mrk24 = NULL;
426        mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
427        if (mrk == NULL) {
428                DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429                    __func__, ha->host_no));
430
431                return (QLA_FUNCTION_FAILED);
432        }
433
434        mrk->entry_type = MARKER_TYPE;
435        mrk->modifier = type;
436        if (type != MK_SYNC_ALL) {
437                if (IS_FWI2_CAPABLE(ha)) {
438                        mrk24 = (struct mrk_entry_24xx *) mrk;
439                        mrk24->nport_handle = cpu_to_le16(loop_id);
440                        mrk24->lun[1] = LSB(lun);
441                        mrk24->lun[2] = MSB(lun);
442                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443                        mrk24->vp_index = ha->vp_idx;
444                } else {
445                        SET_TARGET_ID(ha, mrk->target, loop_id);
446                        mrk->lun = cpu_to_le16(lun);
447                }
448        }
449        wmb();
450
451        qla2x00_isp_cmd(pha);
452
453        return (QLA_SUCCESS);
454}
455
456int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
458    uint8_t type)
459{
460        int ret;
461        unsigned long flags = 0;
462        scsi_qla_host_t *pha = to_qla_parent(ha);
463
464        spin_lock_irqsave(&pha->hardware_lock, flags);
465        ret = __qla2x00_marker(ha, loop_id, lun, type);
466        spin_unlock_irqrestore(&pha->hardware_lock, flags);
467
468        return (ret);
469}
470
471/**
472 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
473 * @ha: HA context
474 *
475 * Note: The caller must hold the hardware lock before calling this routine.
476 *
477 * Returns NULL if function failed, else, a pointer to the request packet.
478 */
479static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha)
481{
482        device_reg_t __iomem *reg = ha->iobase;
483        request_t        *pkt = NULL;
484        uint16_t        cnt;
485        uint32_t        *dword_ptr;
486        uint32_t        timer;
487        uint16_t        req_cnt = 1;
488
489        /* Wait 1 second for slot. */
490        for (timer = HZ; timer; timer--) {
491                if ((req_cnt + 2) >= ha->req_q_cnt) {
492                        /* Calculate number of free request entries. */
493                        if (IS_FWI2_CAPABLE(ha))
494                                cnt = (uint16_t)RD_REG_DWORD(
495                                    &reg->isp24.req_q_out);
496                        else
497                                cnt = qla2x00_debounce_register(
498                                    ISP_REQ_Q_OUT(ha, &reg->isp));
499                        if  (ha->req_ring_index < cnt)
500                                ha->req_q_cnt = cnt - ha->req_ring_index;
501                        else
502                                ha->req_q_cnt = ha->request_q_length -
503                                    (ha->req_ring_index - cnt);
504                }
505                /* If room for request in request ring. */
506                if ((req_cnt + 2) < ha->req_q_cnt) {
507                        ha->req_q_cnt--;
508                        pkt = ha->request_ring_ptr;
509
510                        /* Zero out packet. */
511                        dword_ptr = (uint32_t *)pkt;
512                        for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
513                                *dword_ptr++ = 0;
514
515                        /* Set system defined field. */
516                        pkt->sys_define = (uint8_t)ha->req_ring_index;
517
518                        /* Set entry count. */
519                        pkt->entry_count = 1;
520
521                        break;
522                }
523
524                /* Release ring specific lock */
525                spin_unlock(&ha->hardware_lock);
526
527                udelay(2);   /* 2 us */
528
529                /* Check for pending interrupts. */
530                /* During init we issue marker directly */
531                if (!ha->marker_needed && !ha->flags.init_done)
532                        qla2x00_poll(ha);
533
534                spin_lock_irq(&ha->hardware_lock);
535        }
536        if (!pkt) {
537                DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
538        }
539
540        return (pkt);
541}
542
543/**
544 * qla2x00_isp_cmd() - Modify the request ring pointer.
545 * @ha: HA context
546 *
547 * Note: The caller must hold the hardware lock before calling this routine.
548 */
549static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha)
551{
552        device_reg_t __iomem *reg = ha->iobase;
553
554        DEBUG5(printk("%s(): IOCB data:\n", __func__));
555        DEBUG5(qla2x00_dump_buffer(
556            (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
557
558        /* Adjust ring index. */
559        ha->req_ring_index++;
560        if (ha->req_ring_index == ha->request_q_length) {
561                ha->req_ring_index = 0;
562                ha->request_ring_ptr = ha->request_ring;
563        } else
564                ha->request_ring_ptr++;
565
566        /* Set chip new ring index. */
567        if (IS_FWI2_CAPABLE(ha)) {
568                WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
569                RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
570        } else {
571                WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
572                RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
573        }
574
575}
576
577/**
578 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
579 * Continuation Type 1 IOCBs to allocate.
580 *
581 * @dsds: number of data segment decriptors needed
582 *
583 * Returns the number of IOCB entries needed to store @dsds.
584 */
585static inline uint16_t
586qla24xx_calc_iocbs(uint16_t dsds)
587{
588        uint16_t iocbs;
589
590        iocbs = 1;
591        if (dsds > 1) {
592                iocbs += (dsds - 1) / 5;
593                if ((dsds - 1) % 5)
594                        iocbs++;
595        }
596        return iocbs;
597}
598
599/**
600 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
601 * IOCB types.
602 *
603 * @sp: SRB command to process
604 * @cmd_pkt: Command type 3 IOCB
605 * @tot_dsds: Total number of segments to transfer
606 */
607static inline void
608qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
609    uint16_t tot_dsds)
610{
611        uint16_t        avail_dsds;
612        uint32_t        *cur_dsd;
613        scsi_qla_host_t        *ha;
614        struct scsi_cmnd *cmd;
615        struct scatterlist *sg;
616        int i;
617
618        cmd = sp->cmd;
619
620        /* Update entry type to indicate Command Type 3 IOCB */
621        *((uint32_t *)(&cmd_pkt->entry_type)) =
622            __constant_cpu_to_le32(COMMAND_TYPE_7);
623
624        /* No data transfer */
625        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
626                cmd_pkt->byte_count = __constant_cpu_to_le32(0);
627                return;
628        }
629
630        ha = sp->ha;
631
632        /* Set transfer direction */
633        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634                cmd_pkt->task_mgmt_flags =
635                    __constant_cpu_to_le16(TMF_WRITE_DATA);
636                sp->fcport->ha->qla_stats.output_bytes +=
637                    scsi_bufflen(sp->cmd);
638        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639                cmd_pkt->task_mgmt_flags =
640                    __constant_cpu_to_le16(TMF_READ_DATA);
641                sp->fcport->ha->qla_stats.input_bytes +=
642                    scsi_bufflen(sp->cmd);
643        }
644
645        /* One DSD is available in the Command Type 3 IOCB */
646        avail_dsds = 1;
647        cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
648
649        /* Load data segments */
650
651        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
652                dma_addr_t        sle_dma;
653                cont_a64_entry_t *cont_pkt;
654
655                /* Allocate additional continuation packets? */
656                if (avail_dsds == 0) {
657                        /*
658                         * Five DSDs are available in the Continuation
659                         * Type 1 IOCB.
660                         */
661                        cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
662                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663                        avail_dsds = 5;
664                }
665
666                sle_dma = sg_dma_address(sg);
667                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
668                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
669                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
670                avail_dsds--;
671        }
672}
673
674
675/**
676 * qla24xx_start_scsi() - Send a SCSI command to the ISP
677 * @sp: command to send to the ISP
678 *
679 * Returns non-zero if a failure occurred, else zero.
680 */
681int
682qla24xx_start_scsi(srb_t *sp)
683{
684        int                ret, nseg;
685        unsigned long   flags;
686        scsi_qla_host_t        *ha, *pha;
687        struct scsi_cmnd *cmd;
688        uint32_t        *clr_ptr;
689        uint32_t        index;
690        uint32_t        handle;
691        struct cmd_type_7 *cmd_pkt;
692        uint16_t        cnt;
693        uint16_t        req_cnt;
694        uint16_t        tot_dsds;
695        struct device_reg_24xx __iomem *reg;
696
697        /* Setup device pointers. */
698        ret = 0;
699        ha = sp->ha;
700        pha = to_qla_parent(ha);
701        reg = &ha->iobase->isp24;
702        cmd = sp->cmd;
703        /* So we know we haven't pci_map'ed anything yet */
704        tot_dsds = 0;
705
706        /* Send marker if required */
707        if (ha->marker_needed != 0) {
708                if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
709                        return QLA_FUNCTION_FAILED;
710                }
711                ha->marker_needed = 0;
712        }
713
714        /* Acquire ring specific lock */
715        spin_lock_irqsave(&pha->hardware_lock, flags);
716
717        /* Check for room in outstanding command list. */
718        handle = ha->current_outstanding_cmd;
719        for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720                handle++;
721                if (handle == MAX_OUTSTANDING_COMMANDS)
722                        handle = 1;
723                if (!ha->outstanding_cmds[handle])
724                        break;
725        }
726        if (index == MAX_OUTSTANDING_COMMANDS)
727                goto queuing_error;
728
729        /* Map the sg table so we have an accurate count of sg entries needed */
730        if (scsi_sg_count(cmd)) {
731                nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
732                    scsi_sg_count(cmd), cmd->sc_data_direction);
733                if (unlikely(!nseg))
734                        goto queuing_error;
735        } else
736                nseg = 0;
737
738        tot_dsds = nseg;
739
740        req_cnt = qla24xx_calc_iocbs(tot_dsds);
741        if (ha->req_q_cnt < (req_cnt + 2)) {
742                cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
743                if (ha->req_ring_index < cnt)
744                        ha->req_q_cnt = cnt - ha->req_ring_index;
745                else
746                        ha->req_q_cnt = ha->request_q_length -
747                                (ha->req_ring_index - cnt);
748        }
749        if (ha->req_q_cnt < (req_cnt + 2))
750                goto queuing_error;
751
752        /* Build command packet. */
753        ha->current_outstanding_cmd = handle;
754        ha->outstanding_cmds[handle] = sp;
755        sp->ha = ha;
756        sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757        ha->req_q_cnt -= req_cnt;
758
759        cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
760        cmd_pkt->handle = handle;
761
762        /* Zero out remaining portion of packet. */
763        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
764        clr_ptr = (uint32_t *)cmd_pkt + 2;
765        memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
766        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
767
768        /* Set NPORT-ID and LUN number*/
769        cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
770        cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
771        cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
772        cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
773        cmd_pkt->vp_index = sp->fcport->vp_idx;
774
775        int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
776        host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
777
778        /* Load SCSI command packet. */
779        memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
780        host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
781
782        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
783
784        /* Build IOCB segments */
785        qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
786
787        /* Set total data segment count. */
788        cmd_pkt->entry_count = (uint8_t)req_cnt;
789        wmb();
790
791        /* Adjust ring index. */
792        ha->req_ring_index++;
793        if (ha->req_ring_index == ha->request_q_length) {
794                ha->req_ring_index = 0;
795                ha->request_ring_ptr = ha->request_ring;
796        } else
797                ha->request_ring_ptr++;
798
799        sp->flags |= SRB_DMA_VALID;
800
801        /* Set chip new ring index. */
802        WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
803        RD_REG_DWORD_RELAXED(&reg->req_q_in);                /* PCI Posting. */
804
805        /* Manage unprocessed RIO/ZIO commands in response queue. */
806        if (ha->flags.process_response_queue &&
807            ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
808                qla24xx_process_response_queue(ha);
809
810        spin_unlock_irqrestore(&pha->hardware_lock, flags);
811        return QLA_SUCCESS;
812
813queuing_error:
814        if (tot_dsds)
815                scsi_dma_unmap(cmd);
816
817        spin_unlock_irqrestore(&pha->hardware_lock, flags);
818
819        return QLA_FUNCTION_FAILED;
820}