1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Read/write thread of a guest agent for virtio-trace
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Hitachi, Ltd.
6*4882a593Smuzhiyun * Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com>
7*4882a593Smuzhiyun * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define _GNU_SOURCE
11*4882a593Smuzhiyun #include <fcntl.h>
12*4882a593Smuzhiyun #include <stdio.h>
13*4882a593Smuzhiyun #include <stdlib.h>
14*4882a593Smuzhiyun #include <unistd.h>
15*4882a593Smuzhiyun #include <sys/syscall.h>
16*4882a593Smuzhiyun #include "trace-agent.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define READ_WAIT_USEC 100000
19*4882a593Smuzhiyun
rw_thread_info_new(void)20*4882a593Smuzhiyun void *rw_thread_info_new(void)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct rw_thread_info *rw_ti;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun rw_ti = zalloc(sizeof(struct rw_thread_info));
25*4882a593Smuzhiyun if (rw_ti == NULL) {
26*4882a593Smuzhiyun pr_err("rw_thread_info zalloc error\n");
27*4882a593Smuzhiyun exit(EXIT_FAILURE);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun rw_ti->cpu_num = -1;
31*4882a593Smuzhiyun rw_ti->in_fd = -1;
32*4882a593Smuzhiyun rw_ti->out_fd = -1;
33*4882a593Smuzhiyun rw_ti->read_pipe = -1;
34*4882a593Smuzhiyun rw_ti->write_pipe = -1;
35*4882a593Smuzhiyun rw_ti->pipe_size = PIPE_INIT;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return rw_ti;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
rw_thread_init(int cpu,const char * in_path,const char * out_path,bool stdout_flag,unsigned long pipe_size,struct rw_thread_info * rw_ti)40*4882a593Smuzhiyun void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
41*4882a593Smuzhiyun bool stdout_flag, unsigned long pipe_size,
42*4882a593Smuzhiyun struct rw_thread_info *rw_ti)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int data_pipe[2];
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun rw_ti->cpu_num = cpu;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* set read(input) fd */
49*4882a593Smuzhiyun rw_ti->in_fd = open(in_path, O_RDONLY);
50*4882a593Smuzhiyun if (rw_ti->in_fd == -1) {
51*4882a593Smuzhiyun pr_err("Could not open in_fd (CPU:%d)\n", cpu);
52*4882a593Smuzhiyun goto error;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* set write(output) fd */
56*4882a593Smuzhiyun if (!stdout_flag) {
57*4882a593Smuzhiyun /* virtio-serial output mode */
58*4882a593Smuzhiyun rw_ti->out_fd = open(out_path, O_WRONLY);
59*4882a593Smuzhiyun if (rw_ti->out_fd == -1) {
60*4882a593Smuzhiyun pr_err("Could not open out_fd (CPU:%d)\n", cpu);
61*4882a593Smuzhiyun goto error;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun } else
64*4882a593Smuzhiyun /* stdout mode */
65*4882a593Smuzhiyun rw_ti->out_fd = STDOUT_FILENO;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (pipe2(data_pipe, O_NONBLOCK) < 0) {
68*4882a593Smuzhiyun pr_err("Could not create pipe in rw-thread(%d)\n", cpu);
69*4882a593Smuzhiyun goto error;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Size of pipe is 64kB in default based on fs/pipe.c.
74*4882a593Smuzhiyun * To read/write trace data speedy, pipe size is changed.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun if (fcntl(*data_pipe, F_SETPIPE_SZ, pipe_size) < 0) {
77*4882a593Smuzhiyun pr_err("Could not change pipe size in rw-thread(%d)\n", cpu);
78*4882a593Smuzhiyun goto error;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun rw_ti->read_pipe = data_pipe[1];
82*4882a593Smuzhiyun rw_ti->write_pipe = data_pipe[0];
83*4882a593Smuzhiyun rw_ti->pipe_size = pipe_size;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return NULL;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun error:
88*4882a593Smuzhiyun exit(EXIT_FAILURE);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Bind a thread to a cpu */
bind_cpu(int cpu_num)92*4882a593Smuzhiyun static void bind_cpu(int cpu_num)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun cpu_set_t mask;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun CPU_ZERO(&mask);
97*4882a593Smuzhiyun CPU_SET(cpu_num, &mask);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* bind my thread to cpu_num by assigning zero to the first argument */
100*4882a593Smuzhiyun if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
101*4882a593Smuzhiyun pr_err("Could not set CPU#%d affinity\n", (int)cpu_num);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
rw_thread_main(void * thread_info)104*4882a593Smuzhiyun static void *rw_thread_main(void *thread_info)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun ssize_t rlen, wlen;
107*4882a593Smuzhiyun ssize_t ret;
108*4882a593Smuzhiyun struct rw_thread_info *ts = (struct rw_thread_info *)thread_info;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun bind_cpu(ts->cpu_num);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun while (1) {
113*4882a593Smuzhiyun /* Wait for a read order of trace data by Host OS */
114*4882a593Smuzhiyun if (!global_run_operation) {
115*4882a593Smuzhiyun pthread_mutex_lock(&mutex_notify);
116*4882a593Smuzhiyun pthread_cond_wait(&cond_wakeup, &mutex_notify);
117*4882a593Smuzhiyun pthread_mutex_unlock(&mutex_notify);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (global_sig_receive)
121*4882a593Smuzhiyun break;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Each thread read trace_pipe_raw of each cpu bounding the
125*4882a593Smuzhiyun * thread, so contention of multi-threads does not occur.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun rlen = splice(ts->in_fd, NULL, ts->read_pipe, NULL,
128*4882a593Smuzhiyun ts->pipe_size, SPLICE_F_MOVE | SPLICE_F_MORE);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (rlen < 0) {
131*4882a593Smuzhiyun pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num);
132*4882a593Smuzhiyun goto error;
133*4882a593Smuzhiyun } else if (rlen == 0) {
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * If trace data do not exist or are unreadable not
136*4882a593Smuzhiyun * for exceeding the page size, splice_read returns
137*4882a593Smuzhiyun * NULL. Then, this waits for being filled the data in a
138*4882a593Smuzhiyun * ring-buffer.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun usleep(READ_WAIT_USEC);
141*4882a593Smuzhiyun pr_debug("Read retry(cpu:%d)\n", ts->cpu_num);
142*4882a593Smuzhiyun continue;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun wlen = 0;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun do {
148*4882a593Smuzhiyun ret = splice(ts->write_pipe, NULL, ts->out_fd, NULL,
149*4882a593Smuzhiyun rlen - wlen,
150*4882a593Smuzhiyun SPLICE_F_MOVE | SPLICE_F_MORE);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (ret < 0) {
153*4882a593Smuzhiyun pr_err("Splice_write in rw-thread(%d)\n",
154*4882a593Smuzhiyun ts->cpu_num);
155*4882a593Smuzhiyun goto error;
156*4882a593Smuzhiyun } else if (ret == 0)
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * When host reader is not in time for reading
159*4882a593Smuzhiyun * trace data, guest will be stopped. This is
160*4882a593Smuzhiyun * because char dev in QEMU is not supported
161*4882a593Smuzhiyun * non-blocking mode. Then, writer might be
162*4882a593Smuzhiyun * sleep in that case.
163*4882a593Smuzhiyun * This sleep will be removed by supporting
164*4882a593Smuzhiyun * non-blocking mode.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun sleep(1);
167*4882a593Smuzhiyun wlen += ret;
168*4882a593Smuzhiyun } while (wlen < rlen);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return NULL;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun error:
174*4882a593Smuzhiyun exit(EXIT_FAILURE);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun
rw_thread_run(struct rw_thread_info * rw_ti)178*4882a593Smuzhiyun pthread_t rw_thread_run(struct rw_thread_info *rw_ti)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun int ret;
181*4882a593Smuzhiyun pthread_t rw_thread_per_cpu;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun ret = pthread_create(&rw_thread_per_cpu, NULL, rw_thread_main, rw_ti);
184*4882a593Smuzhiyun if (ret != 0) {
185*4882a593Smuzhiyun pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num);
186*4882a593Smuzhiyun exit(EXIT_FAILURE);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return rw_thread_per_cpu;
190*4882a593Smuzhiyun }
191