David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/dmaengine.h> |
| 7 | #include <crypto/scatterwalk.h> |
| 8 | |
| 9 | #include "dma.h" |
| 10 | |
| 11 | int qce_dma_request(struct device *dev, struct qce_dma_data *dma) |
| 12 | { |
| 13 | int ret; |
| 14 | |
| 15 | dma->txchan = dma_request_slave_channel_reason(dev, "tx"); |
| 16 | if (IS_ERR(dma->txchan)) |
| 17 | return PTR_ERR(dma->txchan); |
| 18 | |
| 19 | dma->rxchan = dma_request_slave_channel_reason(dev, "rx"); |
| 20 | if (IS_ERR(dma->rxchan)) { |
| 21 | ret = PTR_ERR(dma->rxchan); |
| 22 | goto error_rx; |
| 23 | } |
| 24 | |
| 25 | dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, |
| 26 | GFP_KERNEL); |
| 27 | if (!dma->result_buf) { |
| 28 | ret = -ENOMEM; |
| 29 | goto error_nomem; |
| 30 | } |
| 31 | |
| 32 | dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ; |
| 33 | |
| 34 | return 0; |
| 35 | error_nomem: |
| 36 | dma_release_channel(dma->rxchan); |
| 37 | error_rx: |
| 38 | dma_release_channel(dma->txchan); |
| 39 | return ret; |
| 40 | } |
| 41 | |
| 42 | void qce_dma_release(struct qce_dma_data *dma) |
| 43 | { |
| 44 | dma_release_channel(dma->txchan); |
| 45 | dma_release_channel(dma->rxchan); |
| 46 | kfree(dma->result_buf); |
| 47 | } |
| 48 | |
| 49 | struct scatterlist * |
| 50 | qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) |
| 51 | { |
| 52 | struct scatterlist *sg = sgt->sgl, *sg_last = NULL; |
| 53 | |
| 54 | while (sg) { |
| 55 | if (!sg_page(sg)) |
| 56 | break; |
| 57 | sg = sg_next(sg); |
| 58 | } |
| 59 | |
| 60 | if (!sg) |
| 61 | return ERR_PTR(-EINVAL); |
| 62 | |
| 63 | while (new_sgl && sg) { |
| 64 | sg_set_page(sg, sg_page(new_sgl), new_sgl->length, |
| 65 | new_sgl->offset); |
| 66 | sg_last = sg; |
| 67 | sg = sg_next(sg); |
| 68 | new_sgl = sg_next(new_sgl); |
| 69 | } |
| 70 | |
| 71 | return sg_last; |
| 72 | } |
| 73 | |
| 74 | static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, |
| 75 | int nents, unsigned long flags, |
| 76 | enum dma_transfer_direction dir, |
| 77 | dma_async_tx_callback cb, void *cb_param) |
| 78 | { |
| 79 | struct dma_async_tx_descriptor *desc; |
| 80 | dma_cookie_t cookie; |
| 81 | |
| 82 | if (!sg || !nents) |
| 83 | return -EINVAL; |
| 84 | |
| 85 | desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); |
| 86 | if (!desc) |
| 87 | return -EINVAL; |
| 88 | |
| 89 | desc->callback = cb; |
| 90 | desc->callback_param = cb_param; |
| 91 | cookie = dmaengine_submit(desc); |
| 92 | |
| 93 | return dma_submit_error(cookie); |
| 94 | } |
| 95 | |
| 96 | int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg, |
| 97 | int rx_nents, struct scatterlist *tx_sg, int tx_nents, |
| 98 | dma_async_tx_callback cb, void *cb_param) |
| 99 | { |
| 100 | struct dma_chan *rxchan = dma->rxchan; |
| 101 | struct dma_chan *txchan = dma->txchan; |
| 102 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; |
| 103 | int ret; |
| 104 | |
| 105 | ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV, |
| 106 | NULL, NULL); |
| 107 | if (ret) |
| 108 | return ret; |
| 109 | |
| 110 | return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM, |
| 111 | cb, cb_param); |
| 112 | } |
| 113 | |
| 114 | void qce_dma_issue_pending(struct qce_dma_data *dma) |
| 115 | { |
| 116 | dma_async_issue_pending(dma->rxchan); |
| 117 | dma_async_issue_pending(dma->txchan); |
| 118 | } |
| 119 | |
| 120 | int qce_dma_terminate_all(struct qce_dma_data *dma) |
| 121 | { |
| 122 | int ret; |
| 123 | |
| 124 | ret = dmaengine_terminate_all(dma->rxchan); |
| 125 | return ret ?: dmaengine_terminate_all(dma->txchan); |
| 126 | } |