[PATCH] qat: fix deadlock in backlog processing I was testing QAT with dm-crypt and I got some deadlocks. The reason for the deadlocks is this: suppose that one of the "if" conditions in "qat_alg_send_message_maybacklog" is true and we jump to the "enqueue" label. At this point, an interrupt comes in and clears all pending messages. Now, the interrupt returns, we grab backlog->lock, add the message to the backlog, drop backlog->lock - and there is no one to remove the backlogged message out of the list and submit it. I fixed it with this patch. I didn't want to add a spinlock to the hot path, so I take it only if some of the condition suggests that queuing may be required. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org --- drivers/crypto/intel/qat/qat_common/qat_algs_send.c | 45 +++++++++++--------- 1 file changed, 25 insertions(+), 20 deletions(-) Index: linux-2.6/drivers/crypto/intel/qat/qat_common/qat_algs_send.c =================================================================== --- linux-2.6.orig/drivers/crypto/intel/qat/qat_common/qat_algs_send.c +++ linux-2.6/drivers/crypto/intel/qat/qat_common/qat_algs_send.c @@ -40,39 +40,44 @@ void qat_alg_send_backlog(struct qat_ins spin_unlock_bh(&backlog->lock); } -static void qat_alg_backlog_req(struct qat_alg_req *req, - struct qat_instance_backlog *backlog) -{ - INIT_LIST_HEAD(&req->list); - - spin_lock_bh(&backlog->lock); - list_add_tail(&req->list, &backlog->list); - spin_unlock_bh(&backlog->lock); -} - -static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +static bool qat_alg_try_to_send(struct qat_alg_req *req) { struct qat_instance_backlog *backlog = req->backlog; struct adf_etr_ring_data *tx_ring = req->tx_ring; u32 *fw_req = req->fw_req; /* If any request is already backlogged, then add to backlog list */ - if (!list_empty(&backlog->list)) - goto enqueue; + if (unlikely(!list_empty(&backlog->list))) + return false; /* If ring is nearly full, then add to backlog list */ - if (adf_ring_nearly_full(tx_ring)) - goto enqueue; + if (unlikely(adf_ring_nearly_full(tx_ring))) + return false; /* If adding request to HW ring fails, then add to backlog list */ - if (adf_send_message(tx_ring, fw_req)) - goto enqueue; + if (unlikely(adf_send_message(tx_ring, fw_req))) + return false; - return -EINPROGRESS; + return true; +} -enqueue: - qat_alg_backlog_req(req, backlog); +static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +{ + struct qat_instance_backlog *backlog = req->backlog; + + if (likely(qat_alg_try_to_send(req))) + return -EINPROGRESS; + + spin_lock_bh(&backlog->lock); + if (unlikely(qat_alg_try_to_send(req))) { + spin_unlock_bh(&backlog->lock); + return -EINPROGRESS; + } + + list_add_tail(&req->list, &backlog->list); + + spin_unlock_bh(&backlog->lock); return -EBUSY; }