1 /* drivers/misc/uid_sys_stats.c
2 *
3 * Copyright (C) 2014 - 2015 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16 #include <linux/atomic.h>
17 #include <linux/err.h>
18 #include <linux/hashtable.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/proc_fs.h>
24 #include <linux/profile.h>
25 #include <linux/rtmutex.h>
26 #include <linux/sched/cputime.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/uaccess.h>
30
31
32 #define UID_HASH_BITS 10
33 DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
34
35 static DEFINE_RT_MUTEX(uid_lock);
36 static struct proc_dir_entry *cpu_parent;
37 static struct proc_dir_entry *io_parent;
38 static struct proc_dir_entry *proc_parent;
39
40 struct io_stats {
41 u64 read_bytes;
42 u64 write_bytes;
43 u64 rchar;
44 u64 wchar;
45 u64 fsync;
46 };
47
48 #define UID_STATE_FOREGROUND 0
49 #define UID_STATE_BACKGROUND 1
50 #define UID_STATE_BUCKET_SIZE 2
51
52 #define UID_STATE_TOTAL_CURR 2
53 #define UID_STATE_TOTAL_LAST 3
54 #define UID_STATE_DEAD_TASKS 4
55 #define UID_STATE_SIZE 5
56
57 #define MAX_TASK_COMM_LEN 256
58
59 struct task_entry {
60 char comm[MAX_TASK_COMM_LEN];
61 pid_t pid;
62 struct io_stats io[UID_STATE_SIZE];
63 struct hlist_node hash;
64 };
65
66 struct uid_entry {
67 uid_t uid;
68 u64 utime;
69 u64 stime;
70 u64 active_utime;
71 u64 active_stime;
72 int state;
73 struct io_stats io[UID_STATE_SIZE];
74 struct hlist_node hash;
75 #ifdef CONFIG_UID_SYS_STATS_DEBUG
76 DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
77 #endif
78 };
79
compute_write_bytes(struct task_struct * task)80 static u64 compute_write_bytes(struct task_struct *task)
81 {
82 if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
83 return 0;
84
85 return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
86 }
87
compute_io_bucket_stats(struct io_stats * io_bucket,struct io_stats * io_curr,struct io_stats * io_last,struct io_stats * io_dead)88 static void compute_io_bucket_stats(struct io_stats *io_bucket,
89 struct io_stats *io_curr,
90 struct io_stats *io_last,
91 struct io_stats *io_dead)
92 {
93 /* tasks could switch to another uid group, but its io_last in the
94 * previous uid group could still be positive.
95 * therefore before each update, do an overflow check first
96 */
97 int64_t delta;
98
99 delta = io_curr->read_bytes + io_dead->read_bytes -
100 io_last->read_bytes;
101 io_bucket->read_bytes += delta > 0 ? delta : 0;
102 delta = io_curr->write_bytes + io_dead->write_bytes -
103 io_last->write_bytes;
104 io_bucket->write_bytes += delta > 0 ? delta : 0;
105 delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
106 io_bucket->rchar += delta > 0 ? delta : 0;
107 delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
108 io_bucket->wchar += delta > 0 ? delta : 0;
109 delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
110 io_bucket->fsync += delta > 0 ? delta : 0;
111
112 io_last->read_bytes = io_curr->read_bytes;
113 io_last->write_bytes = io_curr->write_bytes;
114 io_last->rchar = io_curr->rchar;
115 io_last->wchar = io_curr->wchar;
116 io_last->fsync = io_curr->fsync;
117
118 memset(io_dead, 0, sizeof(struct io_stats));
119 }
120
121 #ifdef CONFIG_UID_SYS_STATS_DEBUG
get_full_task_comm(struct task_entry * task_entry,struct task_struct * task)122 static void get_full_task_comm(struct task_entry *task_entry,
123 struct task_struct *task)
124 {
125 int i = 0, offset = 0, len = 0;
126 /* save one byte for terminating null character */
127 int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
128 char buf[MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1];
129 struct mm_struct *mm = task->mm;
130
131 /* fill the first TASK_COMM_LEN bytes with thread name */
132 __get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
133 i = strlen(task_entry->comm);
134 while (i < TASK_COMM_LEN)
135 task_entry->comm[i++] = ' ';
136
137 /* next the executable file name */
138 if (mm) {
139 mmap_write_lock(mm);
140 if (mm->exe_file) {
141 char *pathname = d_path(&mm->exe_file->f_path, buf,
142 unused_len);
143
144 if (!IS_ERR(pathname)) {
145 len = strlcpy(task_entry->comm + i, pathname,
146 unused_len);
147 i += len;
148 task_entry->comm[i++] = ' ';
149 unused_len--;
150 }
151 }
152 mmap_write_unlock(mm);
153 }
154 unused_len -= len;
155
156 /* fill the rest with command line argument
157 * replace each null or new line character
158 * between args in argv with whitespace */
159 len = get_cmdline(task, buf, unused_len);
160 while (offset < len) {
161 if (buf[offset] != '\0' && buf[offset] != '\n')
162 task_entry->comm[i++] = buf[offset];
163 else
164 task_entry->comm[i++] = ' ';
165 offset++;
166 }
167
168 /* get rid of trailing whitespaces in case when arg is memset to
169 * zero before being reset in userspace
170 */
171 while (task_entry->comm[i-1] == ' ')
172 i--;
173 task_entry->comm[i] = '\0';
174 }
175
find_task_entry(struct uid_entry * uid_entry,struct task_struct * task)176 static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
177 struct task_struct *task)
178 {
179 struct task_entry *task_entry;
180
181 hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
182 task->pid) {
183 if (task->pid == task_entry->pid) {
184 /* if thread name changed, update the entire command */
185 int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
186 - task_entry->comm;
187
188 if (strncmp(task_entry->comm, task->comm, len))
189 get_full_task_comm(task_entry, task);
190 return task_entry;
191 }
192 }
193 return NULL;
194 }
195
find_or_register_task(struct uid_entry * uid_entry,struct task_struct * task)196 static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
197 struct task_struct *task)
198 {
199 struct task_entry *task_entry;
200 pid_t pid = task->pid;
201
202 task_entry = find_task_entry(uid_entry, task);
203 if (task_entry)
204 return task_entry;
205
206 task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
207 if (!task_entry)
208 return NULL;
209
210 get_full_task_comm(task_entry, task);
211
212 task_entry->pid = pid;
213 hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
214
215 return task_entry;
216 }
217
remove_uid_tasks(struct uid_entry * uid_entry)218 static void remove_uid_tasks(struct uid_entry *uid_entry)
219 {
220 struct task_entry *task_entry;
221 unsigned long bkt_task;
222 struct hlist_node *tmp_task;
223
224 hash_for_each_safe(uid_entry->task_entries, bkt_task,
225 tmp_task, task_entry, hash) {
226 hash_del(&task_entry->hash);
227 kfree(task_entry);
228 }
229 }
230
set_io_uid_tasks_zero(struct uid_entry * uid_entry)231 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
232 {
233 struct task_entry *task_entry;
234 unsigned long bkt_task;
235
236 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
237 memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
238 sizeof(struct io_stats));
239 }
240 }
241
add_uid_tasks_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)242 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
243 struct task_struct *task, int slot)
244 {
245 struct task_entry *task_entry = find_or_register_task(uid_entry, task);
246 struct io_stats *task_io_slot = &task_entry->io[slot];
247
248 task_io_slot->read_bytes += task->ioac.read_bytes;
249 task_io_slot->write_bytes += compute_write_bytes(task);
250 task_io_slot->rchar += task->ioac.rchar;
251 task_io_slot->wchar += task->ioac.wchar;
252 task_io_slot->fsync += task->ioac.syscfs;
253 }
254
compute_io_uid_tasks(struct uid_entry * uid_entry)255 static void compute_io_uid_tasks(struct uid_entry *uid_entry)
256 {
257 struct task_entry *task_entry;
258 unsigned long bkt_task;
259
260 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
261 compute_io_bucket_stats(&task_entry->io[uid_entry->state],
262 &task_entry->io[UID_STATE_TOTAL_CURR],
263 &task_entry->io[UID_STATE_TOTAL_LAST],
264 &task_entry->io[UID_STATE_DEAD_TASKS]);
265 }
266 }
267
show_io_uid_tasks(struct seq_file * m,struct uid_entry * uid_entry)268 static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
269 {
270 struct task_entry *task_entry;
271 unsigned long bkt_task;
272
273 hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
274 /* Separated by comma because space exists in task comm */
275 seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
276 task_entry->comm,
277 (unsigned long)task_entry->pid,
278 task_entry->io[UID_STATE_FOREGROUND].rchar,
279 task_entry->io[UID_STATE_FOREGROUND].wchar,
280 task_entry->io[UID_STATE_FOREGROUND].read_bytes,
281 task_entry->io[UID_STATE_FOREGROUND].write_bytes,
282 task_entry->io[UID_STATE_BACKGROUND].rchar,
283 task_entry->io[UID_STATE_BACKGROUND].wchar,
284 task_entry->io[UID_STATE_BACKGROUND].read_bytes,
285 task_entry->io[UID_STATE_BACKGROUND].write_bytes,
286 task_entry->io[UID_STATE_FOREGROUND].fsync,
287 task_entry->io[UID_STATE_BACKGROUND].fsync);
288 }
289 }
290 #else
remove_uid_tasks(struct uid_entry * uid_entry)291 static void remove_uid_tasks(struct uid_entry *uid_entry) {};
set_io_uid_tasks_zero(struct uid_entry * uid_entry)292 static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
add_uid_tasks_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)293 static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
294 struct task_struct *task, int slot) {};
compute_io_uid_tasks(struct uid_entry * uid_entry)295 static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
show_io_uid_tasks(struct seq_file * m,struct uid_entry * uid_entry)296 static void show_io_uid_tasks(struct seq_file *m,
297 struct uid_entry *uid_entry) {}
298 #endif
299
find_uid_entry(uid_t uid)300 static struct uid_entry *find_uid_entry(uid_t uid)
301 {
302 struct uid_entry *uid_entry;
303 hash_for_each_possible(hash_table, uid_entry, hash, uid) {
304 if (uid_entry->uid == uid)
305 return uid_entry;
306 }
307 return NULL;
308 }
309
find_or_register_uid(uid_t uid)310 static struct uid_entry *find_or_register_uid(uid_t uid)
311 {
312 struct uid_entry *uid_entry;
313
314 uid_entry = find_uid_entry(uid);
315 if (uid_entry)
316 return uid_entry;
317
318 uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
319 if (!uid_entry)
320 return NULL;
321
322 uid_entry->uid = uid;
323 #ifdef CONFIG_UID_SYS_STATS_DEBUG
324 hash_init(uid_entry->task_entries);
325 #endif
326 hash_add(hash_table, &uid_entry->hash, uid);
327
328 return uid_entry;
329 }
330
uid_cputime_show(struct seq_file * m,void * v)331 static int uid_cputime_show(struct seq_file *m, void *v)
332 {
333 struct uid_entry *uid_entry = NULL;
334 struct task_struct *task, *temp;
335 struct user_namespace *user_ns = current_user_ns();
336 u64 utime;
337 u64 stime;
338 unsigned long bkt;
339 uid_t uid;
340
341 rt_mutex_lock(&uid_lock);
342
343 hash_for_each(hash_table, bkt, uid_entry, hash) {
344 uid_entry->active_stime = 0;
345 uid_entry->active_utime = 0;
346 }
347
348 rcu_read_lock();
349 do_each_thread(temp, task) {
350 uid = from_kuid_munged(user_ns, task_uid(task));
351 if (!uid_entry || uid_entry->uid != uid)
352 uid_entry = find_or_register_uid(uid);
353 if (!uid_entry) {
354 rcu_read_unlock();
355 rt_mutex_unlock(&uid_lock);
356 pr_err("%s: failed to find the uid_entry for uid %d\n",
357 __func__, uid);
358 return -ENOMEM;
359 }
360 /* avoid double accounting of dying threads */
361 if (!(task->flags & PF_EXITING)) {
362 task_cputime_adjusted(task, &utime, &stime);
363 uid_entry->active_utime += utime;
364 uid_entry->active_stime += stime;
365 }
366 } while_each_thread(temp, task);
367 rcu_read_unlock();
368
369 hash_for_each(hash_table, bkt, uid_entry, hash) {
370 u64 total_utime = uid_entry->utime +
371 uid_entry->active_utime;
372 u64 total_stime = uid_entry->stime +
373 uid_entry->active_stime;
374 seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
375 ktime_to_us(total_utime), ktime_to_us(total_stime));
376 }
377
378 rt_mutex_unlock(&uid_lock);
379 return 0;
380 }
381
uid_cputime_open(struct inode * inode,struct file * file)382 static int uid_cputime_open(struct inode *inode, struct file *file)
383 {
384 return single_open(file, uid_cputime_show, PDE_DATA(inode));
385 }
386
387 static const struct proc_ops uid_cputime_fops = {
388 .proc_open = uid_cputime_open,
389 .proc_read = seq_read,
390 .proc_lseek = seq_lseek,
391 .proc_release = single_release,
392 };
393
uid_remove_open(struct inode * inode,struct file * file)394 static int uid_remove_open(struct inode *inode, struct file *file)
395 {
396 return single_open(file, NULL, NULL);
397 }
398
uid_remove_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)399 static ssize_t uid_remove_write(struct file *file,
400 const char __user *buffer, size_t count, loff_t *ppos)
401 {
402 struct uid_entry *uid_entry;
403 struct hlist_node *tmp;
404 char uids[128];
405 char *start_uid, *end_uid = NULL;
406 long int uid_start = 0, uid_end = 0;
407
408 if (count >= sizeof(uids))
409 count = sizeof(uids) - 1;
410
411 if (copy_from_user(uids, buffer, count))
412 return -EFAULT;
413
414 uids[count] = '\0';
415 end_uid = uids;
416 start_uid = strsep(&end_uid, "-");
417
418 if (!start_uid || !end_uid)
419 return -EINVAL;
420
421 if (kstrtol(start_uid, 10, &uid_start) != 0 ||
422 kstrtol(end_uid, 10, &uid_end) != 0) {
423 return -EINVAL;
424 }
425
426 rt_mutex_lock(&uid_lock);
427
428 for (; uid_start <= uid_end; uid_start++) {
429 hash_for_each_possible_safe(hash_table, uid_entry, tmp,
430 hash, (uid_t)uid_start) {
431 if (uid_start == uid_entry->uid) {
432 remove_uid_tasks(uid_entry);
433 hash_del(&uid_entry->hash);
434 kfree(uid_entry);
435 }
436 }
437 }
438
439 rt_mutex_unlock(&uid_lock);
440 return count;
441 }
442
443 static const struct proc_ops uid_remove_fops = {
444 .proc_open = uid_remove_open,
445 .proc_release = single_release,
446 .proc_write = uid_remove_write,
447 };
448
449
add_uid_io_stats(struct uid_entry * uid_entry,struct task_struct * task,int slot)450 static void add_uid_io_stats(struct uid_entry *uid_entry,
451 struct task_struct *task, int slot)
452 {
453 struct io_stats *io_slot = &uid_entry->io[slot];
454
455 /* avoid double accounting of dying threads */
456 if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
457 return;
458
459 io_slot->read_bytes += task->ioac.read_bytes;
460 io_slot->write_bytes += compute_write_bytes(task);
461 io_slot->rchar += task->ioac.rchar;
462 io_slot->wchar += task->ioac.wchar;
463 io_slot->fsync += task->ioac.syscfs;
464
465 add_uid_tasks_io_stats(uid_entry, task, slot);
466 }
467
update_io_stats_all_locked(void)468 static void update_io_stats_all_locked(void)
469 {
470 struct uid_entry *uid_entry = NULL;
471 struct task_struct *task, *temp;
472 struct user_namespace *user_ns = current_user_ns();
473 unsigned long bkt;
474 uid_t uid;
475
476 hash_for_each(hash_table, bkt, uid_entry, hash) {
477 memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
478 sizeof(struct io_stats));
479 set_io_uid_tasks_zero(uid_entry);
480 }
481
482 rcu_read_lock();
483 do_each_thread(temp, task) {
484 uid = from_kuid_munged(user_ns, task_uid(task));
485 if (!uid_entry || uid_entry->uid != uid)
486 uid_entry = find_or_register_uid(uid);
487 if (!uid_entry)
488 continue;
489 add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
490 } while_each_thread(temp, task);
491 rcu_read_unlock();
492
493 hash_for_each(hash_table, bkt, uid_entry, hash) {
494 compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
495 &uid_entry->io[UID_STATE_TOTAL_CURR],
496 &uid_entry->io[UID_STATE_TOTAL_LAST],
497 &uid_entry->io[UID_STATE_DEAD_TASKS]);
498 compute_io_uid_tasks(uid_entry);
499 }
500 }
501
update_io_stats_uid_locked(struct uid_entry * uid_entry)502 static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
503 {
504 struct task_struct *task, *temp;
505 struct user_namespace *user_ns = current_user_ns();
506
507 memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
508 sizeof(struct io_stats));
509 set_io_uid_tasks_zero(uid_entry);
510
511 rcu_read_lock();
512 do_each_thread(temp, task) {
513 if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
514 continue;
515 add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
516 } while_each_thread(temp, task);
517 rcu_read_unlock();
518
519 compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
520 &uid_entry->io[UID_STATE_TOTAL_CURR],
521 &uid_entry->io[UID_STATE_TOTAL_LAST],
522 &uid_entry->io[UID_STATE_DEAD_TASKS]);
523 compute_io_uid_tasks(uid_entry);
524 }
525
526
uid_io_show(struct seq_file * m,void * v)527 static int uid_io_show(struct seq_file *m, void *v)
528 {
529 struct uid_entry *uid_entry;
530 unsigned long bkt;
531
532 rt_mutex_lock(&uid_lock);
533
534 update_io_stats_all_locked();
535
536 hash_for_each(hash_table, bkt, uid_entry, hash) {
537 seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
538 uid_entry->uid,
539 uid_entry->io[UID_STATE_FOREGROUND].rchar,
540 uid_entry->io[UID_STATE_FOREGROUND].wchar,
541 uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
542 uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
543 uid_entry->io[UID_STATE_BACKGROUND].rchar,
544 uid_entry->io[UID_STATE_BACKGROUND].wchar,
545 uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
546 uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
547 uid_entry->io[UID_STATE_FOREGROUND].fsync,
548 uid_entry->io[UID_STATE_BACKGROUND].fsync);
549
550 show_io_uid_tasks(m, uid_entry);
551 }
552
553 rt_mutex_unlock(&uid_lock);
554 return 0;
555 }
556
uid_io_open(struct inode * inode,struct file * file)557 static int uid_io_open(struct inode *inode, struct file *file)
558 {
559 return single_open(file, uid_io_show, PDE_DATA(inode));
560 }
561
562 static const struct proc_ops uid_io_fops = {
563 .proc_open = uid_io_open,
564 .proc_read = seq_read,
565 .proc_lseek = seq_lseek,
566 .proc_release = single_release,
567 };
568
uid_procstat_open(struct inode * inode,struct file * file)569 static int uid_procstat_open(struct inode *inode, struct file *file)
570 {
571 return single_open(file, NULL, NULL);
572 }
573
uid_procstat_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)574 static ssize_t uid_procstat_write(struct file *file,
575 const char __user *buffer, size_t count, loff_t *ppos)
576 {
577 struct uid_entry *uid_entry;
578 uid_t uid;
579 int argc, state;
580 char input[128];
581
582 if (count >= sizeof(input))
583 return -EINVAL;
584
585 if (copy_from_user(input, buffer, count))
586 return -EFAULT;
587
588 input[count] = '\0';
589
590 argc = sscanf(input, "%u %d", &uid, &state);
591 if (argc != 2)
592 return -EINVAL;
593
594 if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
595 return -EINVAL;
596
597 rt_mutex_lock(&uid_lock);
598
599 uid_entry = find_or_register_uid(uid);
600 if (!uid_entry) {
601 rt_mutex_unlock(&uid_lock);
602 return -EINVAL;
603 }
604
605 if (uid_entry->state == state) {
606 rt_mutex_unlock(&uid_lock);
607 return count;
608 }
609
610 update_io_stats_uid_locked(uid_entry);
611
612 uid_entry->state = state;
613
614 rt_mutex_unlock(&uid_lock);
615
616 return count;
617 }
618
619 static const struct proc_ops uid_procstat_fops = {
620 .proc_open = uid_procstat_open,
621 .proc_release = single_release,
622 .proc_write = uid_procstat_write,
623 };
624
process_notifier(struct notifier_block * self,unsigned long cmd,void * v)625 static int process_notifier(struct notifier_block *self,
626 unsigned long cmd, void *v)
627 {
628 struct task_struct *task = v;
629 struct uid_entry *uid_entry;
630 u64 utime, stime;
631 uid_t uid;
632
633 if (!task)
634 return NOTIFY_OK;
635
636 rt_mutex_lock(&uid_lock);
637 uid = from_kuid_munged(current_user_ns(), task_uid(task));
638 uid_entry = find_or_register_uid(uid);
639 if (!uid_entry) {
640 pr_err("%s: failed to find uid %d\n", __func__, uid);
641 goto exit;
642 }
643
644 task_cputime_adjusted(task, &utime, &stime);
645 uid_entry->utime += utime;
646 uid_entry->stime += stime;
647
648 add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
649
650 exit:
651 rt_mutex_unlock(&uid_lock);
652 return NOTIFY_OK;
653 }
654
655 static struct notifier_block process_notifier_block = {
656 .notifier_call = process_notifier,
657 };
658
proc_uid_sys_stats_init(void)659 static int __init proc_uid_sys_stats_init(void)
660 {
661 hash_init(hash_table);
662
663 cpu_parent = proc_mkdir("uid_cputime", NULL);
664 if (!cpu_parent) {
665 pr_err("%s: failed to create uid_cputime proc entry\n",
666 __func__);
667 goto err;
668 }
669
670 proc_create_data("remove_uid_range", 0222, cpu_parent,
671 &uid_remove_fops, NULL);
672 proc_create_data("show_uid_stat", 0444, cpu_parent,
673 &uid_cputime_fops, NULL);
674
675 io_parent = proc_mkdir("uid_io", NULL);
676 if (!io_parent) {
677 pr_err("%s: failed to create uid_io proc entry\n",
678 __func__);
679 goto err;
680 }
681
682 proc_create_data("stats", 0444, io_parent,
683 &uid_io_fops, NULL);
684
685 proc_parent = proc_mkdir("uid_procstat", NULL);
686 if (!proc_parent) {
687 pr_err("%s: failed to create uid_procstat proc entry\n",
688 __func__);
689 goto err;
690 }
691
692 proc_create_data("set", 0222, proc_parent,
693 &uid_procstat_fops, NULL);
694
695 profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
696
697 return 0;
698
699 err:
700 remove_proc_subtree("uid_cputime", NULL);
701 remove_proc_subtree("uid_io", NULL);
702 remove_proc_subtree("uid_procstat", NULL);
703 return -ENOMEM;
704 }
705
706 early_initcall(proc_uid_sys_stats_init);
707