1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-23 16:53:58 -05:00

rpmsg: glink: Use spinlock in tx path

Switch the tx_lock to a spinlock we allow clients to use rpmsg_trysend()
from atomic context.

In order to allow clients to sleep while waiting for space in the FIFO
we release the lock temporarily around the delay; which should be
replaced by sending a READ_NOTIF and waiting for the remote to signal
us that space has been made available.

Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
This commit is contained in:
Bjorn Andersson 2018-02-13 11:04:04 -08:00
parent 33e3820dda
commit 29fc9b3873

View file

@ -113,7 +113,7 @@ struct qcom_glink {
spinlock_t rx_lock; spinlock_t rx_lock;
struct list_head rx_queue; struct list_head rx_queue;
struct mutex tx_lock; spinlock_t tx_lock;
spinlock_t idr_lock; spinlock_t idr_lock;
struct idr lcids; struct idr lcids;
@ -288,15 +288,14 @@ static int qcom_glink_tx(struct qcom_glink *glink,
const void *data, size_t dlen, bool wait) const void *data, size_t dlen, bool wait)
{ {
unsigned int tlen = hlen + dlen; unsigned int tlen = hlen + dlen;
int ret; unsigned long flags;
int ret = 0;
/* Reject packets that are too big */ /* Reject packets that are too big */
if (tlen >= glink->tx_pipe->length) if (tlen >= glink->tx_pipe->length)
return -EINVAL; return -EINVAL;
ret = mutex_lock_interruptible(&glink->tx_lock); spin_lock_irqsave(&glink->tx_lock, flags);
if (ret)
return ret;
while (qcom_glink_tx_avail(glink) < tlen) { while (qcom_glink_tx_avail(glink) < tlen) {
if (!wait) { if (!wait) {
@ -304,7 +303,12 @@ static int qcom_glink_tx(struct qcom_glink *glink,
goto out; goto out;
} }
/* Wait without holding the tx_lock */
spin_unlock_irqrestore(&glink->tx_lock, flags);
usleep_range(10000, 15000); usleep_range(10000, 15000);
spin_lock_irqsave(&glink->tx_lock, flags);
} }
qcom_glink_tx_write(glink, hdr, hlen, data, dlen); qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
@ -313,7 +317,7 @@ static int qcom_glink_tx(struct qcom_glink *glink,
mbox_client_txdone(glink->mbox_chan, 0); mbox_client_txdone(glink->mbox_chan, 0);
out: out:
mutex_unlock(&glink->tx_lock); spin_unlock_irqrestore(&glink->tx_lock, flags);
return ret; return ret;
} }
@ -1567,7 +1571,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
glink->features = features; glink->features = features;
glink->intentless = intentless; glink->intentless = intentless;
mutex_init(&glink->tx_lock); spin_lock_init(&glink->tx_lock);
spin_lock_init(&glink->rx_lock); spin_lock_init(&glink->rx_lock);
INIT_LIST_HEAD(&glink->rx_queue); INIT_LIST_HEAD(&glink->rx_queue);
INIT_WORK(&glink->rx_work, qcom_glink_work); INIT_WORK(&glink->rx_work, qcom_glink_work);