linux/block/blk-exec.c
<<
>>
Prefs
   1/*
   2 * Functions related to setting various queue properties from drivers
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/blk-mq.h>
   9#include <linux/sched/sysctl.h>
  10
  11#include "blk.h"
  12#include "blk-mq-sched.h"
  13
  14/**
  15 * blk_end_sync_rq - executes a completion event on a request
  16 * @rq: request to complete
  17 * @error: end I/O status of the request
  18 */
  19static void blk_end_sync_rq(struct request *rq, blk_status_t error)
  20{
  21        struct completion *waiting = rq->end_io_data;
  22
  23        rq->end_io_data = NULL;
  24
  25        /*
  26         * complete last, if this is a stack request the process (and thus
  27         * the rq pointer) could be invalid right after this complete()
  28         */
  29        complete(waiting);
  30}
  31
  32/**
  33 * blk_execute_rq_nowait - insert a request into queue for execution
  34 * @q:          queue to insert the request in
  35 * @bd_disk:    matching gendisk
  36 * @rq:         request to insert
  37 * @at_head:    insert request at head or tail of queue
  38 * @done:       I/O completion handler
  39 *
  40 * Description:
  41 *    Insert a fully prepared request at the back of the I/O scheduler queue
  42 *    for execution.  Don't wait for completion.
  43 *
  44 * Note:
  45 *    This function will invoke @done directly if the queue is dead.
  46 */
  47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
  48                           struct request *rq, int at_head,
  49                           rq_end_io_fn *done)
  50{
  51        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  52
  53        WARN_ON(irqs_disabled());
  54        WARN_ON(!blk_rq_is_passthrough(rq));
  55
  56        rq->rq_disk = bd_disk;
  57        rq->end_io = done;
  58
  59        /*
  60         * don't check dying flag for MQ because the request won't
  61         * be reused after dying flag is set
  62         */
  63        if (q->mq_ops) {
  64                blk_mq_sched_insert_request(rq, at_head, true, false);
  65                return;
  66        }
  67
  68        spin_lock_irq(q->queue_lock);
  69
  70        if (unlikely(blk_queue_dying(q))) {
  71                rq->rq_flags |= RQF_QUIET;
  72                __blk_end_request_all(rq, BLK_STS_IOERR);
  73                spin_unlock_irq(q->queue_lock);
  74                return;
  75        }
  76
  77        __elv_add_request(q, rq, where);
  78        __blk_run_queue(q);
  79        spin_unlock_irq(q->queue_lock);
  80}
  81EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
  82
  83/**
  84 * blk_execute_rq - insert a request into queue for execution
  85 * @q:          queue to insert the request in
  86 * @bd_disk:    matching gendisk
  87 * @rq:         request to insert
  88 * @at_head:    insert request at head or tail of queue
  89 *
  90 * Description:
  91 *    Insert a fully prepared request at the back of the I/O scheduler queue
  92 *    for execution and wait for completion.
  93 */
  94void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
  95                   struct request *rq, int at_head)
  96{
  97        DECLARE_COMPLETION_ONSTACK(wait);
  98        unsigned long hang_check;
  99
 100        rq->end_io_data = &wait;
 101        blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
 102
 103        /* Prevent hang_check timer from firing at us during very long I/O */
 104        hang_check = sysctl_hung_task_timeout_secs;
 105        if (hang_check)
 106                while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
 107        else
 108                wait_for_completion_io(&wait);
 109}
 110EXPORT_SYMBOL(blk_execute_rq);
 111