mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 17:23:25 -05:00
[NETLINK]: Fix unicast timeouts
Commit ed6dcf4a in the history.git tree broke netlink_unicast timeouts by moving the schedule_timeout() call to a new function that doesn't propagate the remaining timeout back to the caller. This means on each retry we start with the full timeout again. ipc/mqueue.c seems to actually want to wait indefinitely so this behaviour is retained. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
230140cffa
commit
c3d8d1e30c
3 changed files with 10 additions and 8 deletions
|
@ -192,7 +192,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
|
||||||
/* finegrained unicast helpers: */
|
/* finegrained unicast helpers: */
|
||||||
struct sock *netlink_getsockbyfilp(struct file *filp);
|
struct sock *netlink_getsockbyfilp(struct file *filp);
|
||||||
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
||||||
long timeo, struct sock *ssk);
|
long *timeo, struct sock *ssk);
|
||||||
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
|
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
|
||||||
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
|
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
|
||||||
|
|
||||||
|
|
|
@ -1010,6 +1010,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (notification.sigev_notify == SIGEV_THREAD) {
|
if (notification.sigev_notify == SIGEV_THREAD) {
|
||||||
|
long timeo;
|
||||||
|
|
||||||
/* create the notify skb */
|
/* create the notify skb */
|
||||||
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
|
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -1038,8 +1040,8 @@ retry:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = netlink_attachskb(sock, nc, 0,
|
timeo = MAX_SCHEDULE_TIMEOUT;
|
||||||
MAX_SCHEDULE_TIMEOUT, NULL);
|
ret = netlink_attachskb(sock, nc, 0, &timeo, NULL);
|
||||||
if (ret == 1)
|
if (ret == 1)
|
||||||
goto retry;
|
goto retry;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
|
||||||
* 1: repeat lookup - reference dropped while waiting for socket memory.
|
* 1: repeat lookup - reference dropped while waiting for socket memory.
|
||||||
*/
|
*/
|
||||||
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
||||||
long timeo, struct sock *ssk)
|
long *timeo, struct sock *ssk)
|
||||||
{
|
{
|
||||||
struct netlink_sock *nlk;
|
struct netlink_sock *nlk;
|
||||||
|
|
||||||
|
@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||||
test_bit(0, &nlk->state)) {
|
test_bit(0, &nlk->state)) {
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
||||||
if (!timeo) {
|
if (!*timeo) {
|
||||||
if (!ssk || netlink_is_kernel(ssk))
|
if (!ssk || netlink_is_kernel(ssk))
|
||||||
netlink_overrun(sk);
|
netlink_overrun(sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
||||||
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
|
||||||
test_bit(0, &nlk->state)) &&
|
test_bit(0, &nlk->state)) &&
|
||||||
!sock_flag(sk, SOCK_DEAD))
|
!sock_flag(sk, SOCK_DEAD))
|
||||||
timeo = schedule_timeout(timeo);
|
*timeo = schedule_timeout(*timeo);
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(&nlk->wait, &wait);
|
remove_wait_queue(&nlk->wait, &wait);
|
||||||
|
@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return sock_intr_errno(timeo);
|
return sock_intr_errno(*timeo);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -877,7 +877,7 @@ retry:
|
||||||
if (netlink_is_kernel(sk))
|
if (netlink_is_kernel(sk))
|
||||||
return netlink_unicast_kernel(sk, skb);
|
return netlink_unicast_kernel(sk, skb);
|
||||||
|
|
||||||
err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
|
err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
|
||||||
if (err == 1)
|
if (err == 1)
|
||||||
goto retry;
|
goto retry;
|
||||||
if (err)
|
if (err)
|
||||||
|
|
Loading…
Add table
Reference in a new issue