From: Chris Wright <chrisw@osdl.org>

Add a user_struct to the sigqueue structure.  Charge sigqueue allocation and
destruction to the user_struct rather than a global pool.  This per user
rlimit accounting obsoletes the global queued_signals accouting.


---

 25-akpm/include/linux/signal.h |    1 +
 25-akpm/kernel/signal.c        |   17 +++++++++++------
 2 files changed, 12 insertions(+), 6 deletions(-)

diff -puN include/linux/signal.h~rlim-enforce-rlimits-on-queued-signals include/linux/signal.h
--- 25/include/linux/signal.h~rlim-enforce-rlimits-on-queued-signals	Tue May 11 15:21:15 2004
+++ 25-akpm/include/linux/signal.h	Tue May 11 15:21:15 2004
@@ -19,6 +19,7 @@ struct sigqueue {
 	spinlock_t *lock;
 	int flags;
 	siginfo_t info;
+	struct user_struct *user;
 };
 
 /* flags values. */
diff -puN kernel/signal.c~rlim-enforce-rlimits-on-queued-signals kernel/signal.c
--- 25/kernel/signal.c~rlim-enforce-rlimits-on-queued-signals	Tue May 11 15:21:15 2004
+++ 25-akpm/kernel/signal.c	Tue May 11 15:21:15 2004
@@ -264,17 +264,19 @@ next_signal(struct sigpending *pending, 
 	return sig;
 }
 
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
 {
 	struct sigqueue *q = 0;
 
-	if (atomic_read(&nr_queued_signals) < max_queued_signals)
+	if (atomic_read(&current->user->sigpending) <
+			current->rlim[RLIMIT_SIGPENDING].rlim_cur)
 		q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
 	if (q) {
-		atomic_inc(&nr_queued_signals);
 		INIT_LIST_HEAD(&q->list);
 		q->flags = 0;
 		q->lock = 0;
+		q->user = get_uid(current->user);
+		atomic_inc(&q->user->sigpending);
 	}
 	return(q);
 }
@@ -283,8 +285,9 @@ static inline void __sigqueue_free(struc
 {
 	if (q->flags & SIGQUEUE_PREALLOC)
 		return;
+	atomic_dec(&q->user->sigpending);
+	free_uid(q->user);
 	kmem_cache_free(sigqueue_cachep, q);
-	atomic_dec(&nr_queued_signals);
 }
 
 static void flush_sigqueue(struct sigpending *queue)
@@ -719,12 +722,14 @@ static int send_signal(int sig, struct s
 	   make sure at least one signal gets delivered and don't
 	   pass on the info struct.  */
 
-	if (atomic_read(&nr_queued_signals) < max_queued_signals)
+	if (atomic_read(&t->user->sigpending) <
+			t->rlim[RLIMIT_SIGPENDING].rlim_cur)
 		q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
 
 	if (q) {
-		atomic_inc(&nr_queued_signals);
 		q->flags = 0;
+		q->user = get_uid(t->user);
+		atomic_inc(&q->user->sigpending);
 		list_add_tail(&q->list, &signals->list);
 		switch ((unsigned long) info) {
 		case 0:

_