1# This file is part of pybootchartgui. 2 3# pybootchartgui is free software: you can redistribute it and/or modify 4# it under the terms of the GNU General Public License as published by 5# the Free Software Foundation, either version 3 of the License, or 6# (at your option) any later version. 7 8# pybootchartgui is distributed in the hope that it will be useful, 9# but WITHOUT ANY WARRANTY; without even the implied warranty of 10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11# GNU General Public License for more details. 12 13# You should have received a copy of the GNU General Public License 14# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>. 15 16import os 17import string 18import re 19import sys 20import tarfile 21import time 22from collections import defaultdict 23from functools import reduce 24 25from .samples import * 26from .process_tree import ProcessTree 27 28if sys.version_info >= (3, 0): 29 long = int 30 31# Parsing produces as its end result a 'Trace' 32 33class Trace: 34 def __init__(self, writer, paths, options): 35 self.processes = {} 36 self.start = {} 37 self.end = {} 38 self.min = None 39 self.max = None 40 self.headers = None 41 self.disk_stats = [] 42 self.ps_stats = None 43 self.taskstats = None 44 self.cpu_stats = [] 45 self.cmdline = None 46 self.kernel = None 47 self.kernel_tree = None 48 self.filename = None 49 self.parent_map = None 50 self.mem_stats = [] 51 self.monitor_disk = None 52 self.times = [] # Always empty, but expected by draw.py when drawing system charts. 53 54 if len(paths): 55 parse_paths (writer, self, paths) 56 if not self.valid(): 57 raise ParseError("empty state: '%s' does not contain a valid bootchart" % ", ".join(paths)) 58 59 if options.full_time: 60 self.min = min(self.start.keys()) 61 self.max = max(self.end.keys()) 62 63 64 # Rendering system charts depends on start and end 65 # time. Provide them where the original drawing code expects 66 # them, i.e. in proc_tree. 67 class BitbakeProcessTree: 68 def __init__(self, start_time, end_time): 69 self.start_time = start_time 70 self.end_time = end_time 71 self.duration = self.end_time - self.start_time 72 self.proc_tree = BitbakeProcessTree(min(self.start.keys()), 73 max(self.end.keys())) 74 75 76 return 77 78 # Turn that parsed information into something more useful 79 # link processes into a tree of pointers, calculate statistics 80 self.compile(writer) 81 82 # Crop the chart to the end of the first idle period after the given 83 # process 84 if options.crop_after: 85 idle = self.crop (writer, options.crop_after) 86 else: 87 idle = None 88 89 # Annotate other times as the first start point of given process lists 90 self.times = [ idle ] 91 if options.annotate: 92 for procnames in options.annotate: 93 names = [x[:15] for x in procnames.split(",")] 94 for proc in self.ps_stats.process_map.values(): 95 if proc.cmd in names: 96 self.times.append(proc.start_time) 97 break 98 else: 99 self.times.append(None) 100 101 self.proc_tree = ProcessTree(writer, self.kernel, self.ps_stats, 102 self.ps_stats.sample_period, 103 self.headers.get("profile.process"), 104 options.prune, idle, self.taskstats, 105 self.parent_map is not None) 106 107 if self.kernel is not None: 108 self.kernel_tree = ProcessTree(writer, self.kernel, None, 0, 109 self.headers.get("profile.process"), 110 False, None, None, True) 111 112 def valid(self): 113 return len(self.processes) != 0 114 return self.headers != None and self.disk_stats != None and \ 115 self.ps_stats != None and self.cpu_stats != None 116 117 def add_process(self, process, start, end): 118 self.processes[process] = [start, end] 119 if start not in self.start: 120 self.start[start] = [] 121 if process not in self.start[start]: 122 self.start[start].append(process) 123 if end not in self.end: 124 self.end[end] = [] 125 if process not in self.end[end]: 126 self.end[end].append(process) 127 128 def compile(self, writer): 129 130 def find_parent_id_for(pid): 131 if pid == 0: 132 return 0 133 ppid = self.parent_map.get(pid) 134 if ppid: 135 # many of these double forks are so short lived 136 # that we have no samples, or process info for them 137 # so climb the parent hierarcy to find one 138 if int (ppid * 1000) not in self.ps_stats.process_map: 139# print "Pid '%d' short lived with no process" % ppid 140 ppid = find_parent_id_for (ppid) 141# else: 142# print "Pid '%d' has an entry" % ppid 143 else: 144# print "Pid '%d' missing from pid map" % pid 145 return 0 146 return ppid 147 148 # merge in the cmdline data 149 if self.cmdline is not None: 150 for proc in self.ps_stats.process_map.values(): 151 rpid = int (proc.pid // 1000) 152 if rpid in self.cmdline: 153 cmd = self.cmdline[rpid] 154 proc.exe = cmd['exe'] 155 proc.args = cmd['args'] 156# else: 157# print "proc %d '%s' not in cmdline" % (rpid, proc.exe) 158 159 # re-parent any stray orphans if we can 160 if self.parent_map is not None: 161 for process in self.ps_stats.process_map.values(): 162 ppid = find_parent_id_for (int(process.pid // 1000)) 163 if ppid: 164 process.ppid = ppid * 1000 165 166 # stitch the tree together with pointers 167 for process in self.ps_stats.process_map.values(): 168 process.set_parent (self.ps_stats.process_map) 169 170 # count on fingers variously 171 for process in self.ps_stats.process_map.values(): 172 process.calc_stats (self.ps_stats.sample_period) 173 174 def crop(self, writer, crop_after): 175 176 def is_idle_at(util, start, j): 177 k = j + 1 178 while k < len(util) and util[k][0] < start + 300: 179 k += 1 180 k = min(k, len(util)-1) 181 182 if util[j][1] >= 0.25: 183 return False 184 185 avgload = sum(u[1] for u in util[j:k+1]) / (k-j+1) 186 if avgload < 0.25: 187 return True 188 else: 189 return False 190 def is_idle(util, start): 191 for j in range(0, len(util)): 192 if util[j][0] < start: 193 continue 194 return is_idle_at(util, start, j) 195 else: 196 return False 197 198 names = [x[:15] for x in crop_after.split(",")] 199 for proc in self.ps_stats.process_map.values(): 200 if proc.cmd in names or proc.exe in names: 201 writer.info("selected proc '%s' from list (start %d)" 202 % (proc.cmd, proc.start_time)) 203 break 204 if proc is None: 205 writer.warn("no selected crop proc '%s' in list" % crop_after) 206 207 208 cpu_util = [(sample.time, sample.user + sample.sys + sample.io) for sample in self.cpu_stats] 209 disk_util = [(sample.time, sample.util) for sample in self.disk_stats] 210 211 idle = None 212 for i in range(0, len(cpu_util)): 213 if cpu_util[i][0] < proc.start_time: 214 continue 215 if is_idle_at(cpu_util, cpu_util[i][0], i) \ 216 and is_idle(disk_util, cpu_util[i][0]): 217 idle = cpu_util[i][0] 218 break 219 220 if idle is None: 221 writer.warn ("not idle after proc '%s'" % crop_after) 222 return None 223 224 crop_at = idle + 300 225 writer.info ("cropping at time %d" % crop_at) 226 while len (self.cpu_stats) \ 227 and self.cpu_stats[-1].time > crop_at: 228 self.cpu_stats.pop() 229 while len (self.disk_stats) \ 230 and self.disk_stats[-1].time > crop_at: 231 self.disk_stats.pop() 232 233 self.ps_stats.end_time = crop_at 234 235 cropped_map = {} 236 for key, value in self.ps_stats.process_map.items(): 237 if (value.start_time <= crop_at): 238 cropped_map[key] = value 239 240 for proc in cropped_map.values(): 241 proc.duration = min (proc.duration, crop_at - proc.start_time) 242 while len (proc.samples) \ 243 and proc.samples[-1].time > crop_at: 244 proc.samples.pop() 245 246 self.ps_stats.process_map = cropped_map 247 248 return idle 249 250 251 252class ParseError(Exception): 253 """Represents errors during parse of the bootchart.""" 254 def __init__(self, value): 255 self.value = value 256 257 def __str__(self): 258 return self.value 259 260def _parse_headers(file): 261 """Parses the headers of the bootchart.""" 262 def parse(acc, line): 263 (headers, last) = acc 264 if '=' in line: 265 last, value = map (lambda x: x.strip(), line.split('=', 1)) 266 else: 267 value = line.strip() 268 headers[last] += value 269 return headers, last 270 return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0] 271 272def _parse_timed_blocks(file): 273 """Parses (ie., splits) a file into so-called timed-blocks. A 274 timed-block consists of a timestamp on a line by itself followed 275 by zero or more lines of data for that point in time.""" 276 def parse(block): 277 lines = block.split('\n') 278 if not lines: 279 raise ParseError('expected a timed-block consisting a timestamp followed by data lines') 280 try: 281 return (int(lines[0]), lines[1:]) 282 except ValueError: 283 raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0]) 284 blocks = file.read().split('\n\n') 285 return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')] 286 287def _parse_proc_ps_log(writer, file): 288 """ 289 * See proc(5) for details. 290 * 291 * {pid, comm, state, ppid, pgrp, session, tty_nr, tpgid, flags, minflt, cminflt, majflt, cmajflt, utime, stime, 292 * cutime, cstime, priority, nice, 0, itrealvalue, starttime, vsize, rss, rlim, startcode, endcode, startstack, 293 * kstkesp, kstkeip} 294 """ 295 processMap = {} 296 ltime = 0 297 timed_blocks = _parse_timed_blocks(file) 298 for time, lines in timed_blocks: 299 for line in lines: 300 if not line: continue 301 tokens = line.split(' ') 302 if len(tokens) < 21: 303 continue 304 305 offset = [index for index, token in enumerate(tokens[1:]) if token[-1] == ')'][0] 306 pid, cmd, state, ppid = int(tokens[0]), ' '.join(tokens[1:2+offset]), tokens[2+offset], int(tokens[3+offset]) 307 userCpu, sysCpu, stime = int(tokens[13+offset]), int(tokens[14+offset]), int(tokens[21+offset]) 308 309 # magic fixed point-ness ... 310 pid *= 1000 311 ppid *= 1000 312 if pid in processMap: 313 process = processMap[pid] 314 process.cmd = cmd.strip('()') # why rename after latest name?? 315 else: 316 process = Process(writer, pid, cmd.strip('()'), ppid, min(time, stime)) 317 processMap[pid] = process 318 319 if process.last_user_cpu_time is not None and process.last_sys_cpu_time is not None and ltime is not None: 320 userCpuLoad, sysCpuLoad = process.calc_load(userCpu, sysCpu, max(1, time - ltime)) 321 cpuSample = CPUSample('null', userCpuLoad, sysCpuLoad, 0.0) 322 process.samples.append(ProcessSample(time, state, cpuSample)) 323 324 process.last_user_cpu_time = userCpu 325 process.last_sys_cpu_time = sysCpu 326 ltime = time 327 328 if len (timed_blocks) < 2: 329 return None 330 331 startTime = timed_blocks[0][0] 332 avgSampleLength = (ltime - startTime)/(len (timed_blocks) - 1) 333 334 return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime) 335 336def _parse_taskstats_log(writer, file): 337 """ 338 * See bootchart-collector.c for details. 339 * 340 * { pid, ppid, comm, cpu_run_real_total, blkio_delay_total, swapin_delay_total } 341 * 342 """ 343 processMap = {} 344 pidRewrites = {} 345 ltime = None 346 timed_blocks = _parse_timed_blocks(file) 347 for time, lines in timed_blocks: 348 # we have no 'stime' from taskstats, so prep 'init' 349 if ltime is None: 350 process = Process(writer, 1, '[init]', 0, 0) 351 processMap[1000] = process 352 ltime = time 353# continue 354 for line in lines: 355 if not line: continue 356 tokens = line.split(' ') 357 if len(tokens) != 6: 358 continue 359 360 opid, ppid, cmd = int(tokens[0]), int(tokens[1]), tokens[2] 361 cpu_ns, blkio_delay_ns, swapin_delay_ns = long(tokens[-3]), long(tokens[-2]), long(tokens[-1]), 362 363 # make space for trees of pids 364 opid *= 1000 365 ppid *= 1000 366 367 # when the process name changes, we re-write the pid. 368 if opid in pidRewrites: 369 pid = pidRewrites[opid] 370 else: 371 pid = opid 372 373 cmd = cmd.strip('(').strip(')') 374 if pid in processMap: 375 process = processMap[pid] 376 if process.cmd != cmd: 377 pid += 1 378 pidRewrites[opid] = pid 379# print "process mutation ! '%s' vs '%s' pid %s -> pid %s\n" % (process.cmd, cmd, opid, pid) 380 process = process.split (writer, pid, cmd, ppid, time) 381 processMap[pid] = process 382 else: 383 process.cmd = cmd; 384 else: 385 process = Process(writer, pid, cmd, ppid, time) 386 processMap[pid] = process 387 388 delta_cpu_ns = (float) (cpu_ns - process.last_cpu_ns) 389 delta_blkio_delay_ns = (float) (blkio_delay_ns - process.last_blkio_delay_ns) 390 delta_swapin_delay_ns = (float) (swapin_delay_ns - process.last_swapin_delay_ns) 391 392 # make up some state data ... 393 if delta_cpu_ns > 0: 394 state = "R" 395 elif delta_blkio_delay_ns + delta_swapin_delay_ns > 0: 396 state = "D" 397 else: 398 state = "S" 399 400 # retain the ns timing information into a CPUSample - that tries 401 # with the old-style to be a %age of CPU used in this time-slice. 402 if delta_cpu_ns + delta_blkio_delay_ns + delta_swapin_delay_ns > 0: 403# print "proc %s cpu_ns %g delta_cpu %g" % (cmd, cpu_ns, delta_cpu_ns) 404 cpuSample = CPUSample('null', delta_cpu_ns, 0.0, 405 delta_blkio_delay_ns, 406 delta_swapin_delay_ns) 407 process.samples.append(ProcessSample(time, state, cpuSample)) 408 409 process.last_cpu_ns = cpu_ns 410 process.last_blkio_delay_ns = blkio_delay_ns 411 process.last_swapin_delay_ns = swapin_delay_ns 412 ltime = time 413 414 if len (timed_blocks) < 2: 415 return None 416 417 startTime = timed_blocks[0][0] 418 avgSampleLength = (ltime - startTime)/(len(timed_blocks)-1) 419 420 return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime) 421 422def _parse_proc_stat_log(file): 423 samples = [] 424 ltimes = None 425 for time, lines in _parse_timed_blocks(file): 426 # skip emtpy lines 427 if not lines: 428 continue 429 # CPU times {user, nice, system, idle, io_wait, irq, softirq} 430 tokens = lines[0].split() 431 times = [ int(token) for token in tokens[1:] ] 432 if ltimes: 433 user = float((times[0] + times[1]) - (ltimes[0] + ltimes[1])) 434 system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6])) 435 idle = float(times[3] - ltimes[3]) 436 iowait = float(times[4] - ltimes[4]) 437 438 aSum = max(user + system + idle + iowait, 1) 439 samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) ) 440 441 ltimes = times 442 # skip the rest of statistics lines 443 return samples 444 445def _parse_reduced_log(file, sample_class): 446 samples = [] 447 for time, lines in _parse_timed_blocks(file): 448 samples.append(sample_class(time, *[float(x) for x in lines[0].split()])) 449 return samples 450 451def _parse_proc_disk_stat_log(file): 452 """ 453 Parse file for disk stats, but only look at the whole device, eg. sda, 454 not sda1, sda2 etc. The format of relevant lines should be: 455 {major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq} 456 """ 457 disk_regex_re = re.compile ('^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$') 458 459 # this gets called an awful lot. 460 def is_relevant_line(linetokens): 461 if len(linetokens) != 14: 462 return False 463 disk = linetokens[2] 464 return disk_regex_re.match(disk) 465 466 disk_stat_samples = [] 467 468 for time, lines in _parse_timed_blocks(file): 469 sample = DiskStatSample(time) 470 relevant_tokens = [linetokens for linetokens in map (lambda x: x.split(),lines) if is_relevant_line(linetokens)] 471 472 for tokens in relevant_tokens: 473 disk, rsect, wsect, use = tokens[2], int(tokens[5]), int(tokens[9]), int(tokens[12]) 474 sample.add_diskdata([rsect, wsect, use]) 475 476 disk_stat_samples.append(sample) 477 478 disk_stats = [] 479 for sample1, sample2 in zip(disk_stat_samples[:-1], disk_stat_samples[1:]): 480 interval = sample1.time - sample2.time 481 if interval == 0: 482 interval = 1 483 sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ] 484 readTput = sums[0] / 2.0 * 100.0 / interval 485 writeTput = sums[1] / 2.0 * 100.0 / interval 486 util = float( sums[2] ) / 10 / interval 487 util = max(0.0, min(1.0, util)) 488 disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util)) 489 490 return disk_stats 491 492def _parse_reduced_proc_meminfo_log(file): 493 """ 494 Parse file for global memory statistics with 495 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree' values 496 (in that order) directly stored on one line. 497 """ 498 used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',) 499 500 mem_stats = [] 501 for time, lines in _parse_timed_blocks(file): 502 sample = MemSample(time) 503 for name, value in zip(used_values, lines[0].split()): 504 sample.add_value(name, int(value)) 505 506 if sample.valid(): 507 mem_stats.append(DrawMemSample(sample)) 508 509 return mem_stats 510 511def _parse_proc_meminfo_log(file): 512 """ 513 Parse file for global memory statistics. 514 The format of relevant lines should be: ^key: value( unit)? 515 """ 516 used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',) 517 518 mem_stats = [] 519 meminfo_re = re.compile(r'([^ \t:]+):\s*(\d+).*') 520 521 for time, lines in _parse_timed_blocks(file): 522 sample = MemSample(time) 523 524 for line in lines: 525 match = meminfo_re.match(line) 526 if not match: 527 raise ParseError("Invalid meminfo line \"%s\"" % line) 528 sample.add_value(match.group(1), int(match.group(2))) 529 530 if sample.valid(): 531 mem_stats.append(DrawMemSample(sample)) 532 533 return mem_stats 534 535def _parse_monitor_disk_log(file): 536 """ 537 Parse file with information about amount of diskspace used. 538 The format of relevant lines should be: ^volume path: number-of-bytes? 539 """ 540 disk_stats = [] 541 diskinfo_re = re.compile(r'^(.+):\s*(\d+)$') 542 543 for time, lines in _parse_timed_blocks(file): 544 sample = DiskSpaceSample(time) 545 546 for line in lines: 547 match = diskinfo_re.match(line) 548 if not match: 549 raise ParseError("Invalid monitor_disk line \"%s\"" % line) 550 sample.add_value(match.group(1), int(match.group(2))) 551 552 if sample.valid(): 553 disk_stats.append(sample) 554 555 return disk_stats 556 557 558# if we boot the kernel with: initcall_debug printk.time=1 we can 559# get all manner of interesting data from the dmesg output 560# We turn this into a pseudo-process tree: each event is 561# characterised by a 562# we don't try to detect a "kernel finished" state - since the kernel 563# continues to do interesting things after init is called. 564# 565# sample input: 566# [ 0.000000] ACPI: FACP 3f4fc000 000F4 (v04 INTEL Napa 00000001 MSFT 01000013) 567# ... 568# [ 0.039993] calling migration_init+0x0/0x6b @ 1 569# [ 0.039993] initcall migration_init+0x0/0x6b returned 1 after 0 usecs 570def _parse_dmesg(writer, file): 571 timestamp_re = re.compile ("^\[\s*(\d+\.\d+)\s*]\s+(.*)$") 572 split_re = re.compile ("^(\S+)\s+([\S\+_-]+) (.*)$") 573 processMap = {} 574 idx = 0 575 inc = 1.0 / 1000000 576 kernel = Process(writer, idx, "k-boot", 0, 0.1) 577 processMap['k-boot'] = kernel 578 base_ts = False 579 max_ts = 0 580 for line in file.read().split('\n'): 581 t = timestamp_re.match (line) 582 if t is None: 583# print "duff timestamp " + line 584 continue 585 586 time_ms = float (t.group(1)) * 1000 587 # looks like we may have a huge diff after the clock 588 # has been set up. This could lead to huge graph: 589 # so huge we will be killed by the OOM. 590 # So instead of using the plain timestamp we will 591 # use a delta to first one and skip the first one 592 # for convenience 593 if max_ts == 0 and not base_ts and time_ms > 1000: 594 base_ts = time_ms 595 continue 596 max_ts = max(time_ms, max_ts) 597 if base_ts: 598# print "fscked clock: used %f instead of %f" % (time_ms - base_ts, time_ms) 599 time_ms -= base_ts 600 m = split_re.match (t.group(2)) 601 602 if m is None: 603 continue 604# print "match: '%s'" % (m.group(1)) 605 type = m.group(1) 606 func = m.group(2) 607 rest = m.group(3) 608 609 if t.group(2).startswith ('Write protecting the') or \ 610 t.group(2).startswith ('Freeing unused kernel memory'): 611 kernel.duration = time_ms / 10 612 continue 613 614# print "foo: '%s' '%s' '%s'" % (type, func, rest) 615 if type == "calling": 616 ppid = kernel.pid 617 p = re.match ("\@ (\d+)", rest) 618 if p is not None: 619 ppid = float (p.group(1)) // 1000 620# print "match: '%s' ('%g') at '%s'" % (func, ppid, time_ms) 621 name = func.split ('+', 1) [0] 622 idx += inc 623 processMap[func] = Process(writer, ppid + idx, name, ppid, time_ms / 10) 624 elif type == "initcall": 625# print "finished: '%s' at '%s'" % (func, time_ms) 626 if func in processMap: 627 process = processMap[func] 628 process.duration = (time_ms / 10) - process.start_time 629 else: 630 print("corrupted init call for %s" % (func)) 631 632 elif type == "async_waiting" or type == "async_continuing": 633 continue # ignore 634 635 return processMap.values() 636 637# 638# Parse binary pacct accounting file output if we have one 639# cf. /usr/include/linux/acct.h 640# 641def _parse_pacct(writer, file): 642 # read LE int32 643 def _read_le_int32(file): 644 byts = file.read(4) 645 return (ord(byts[0])) | (ord(byts[1]) << 8) | \ 646 (ord(byts[2]) << 16) | (ord(byts[3]) << 24) 647 648 parent_map = {} 649 parent_map[0] = 0 650 while file.read(1) != "": # ignore flags 651 ver = file.read(1) 652 if ord(ver) < 3: 653 print("Invalid version 0x%x" % (ord(ver))) 654 return None 655 656 file.seek (14, 1) # user, group etc. 657 pid = _read_le_int32 (file) 658 ppid = _read_le_int32 (file) 659# print "Parent of %d is %d" % (pid, ppid) 660 parent_map[pid] = ppid 661 file.seek (4 + 4 + 16, 1) # timings 662 file.seek (16, 1) # acct_comm 663 return parent_map 664 665def _parse_paternity_log(writer, file): 666 parent_map = {} 667 parent_map[0] = 0 668 for line in file.read().split('\n'): 669 if not line: 670 continue 671 elems = line.split(' ') # <Child> <Parent> 672 if len (elems) >= 2: 673# print "paternity of %d is %d" % (int(elems[0]), int(elems[1])) 674 parent_map[int(elems[0])] = int(elems[1]) 675 else: 676 print("Odd paternity line '%s'" % (line)) 677 return parent_map 678 679def _parse_cmdline_log(writer, file): 680 cmdLines = {} 681 for block in file.read().split('\n\n'): 682 lines = block.split('\n') 683 if len (lines) >= 3: 684# print "Lines '%s'" % (lines[0]) 685 pid = int (lines[0]) 686 values = {} 687 values['exe'] = lines[1].lstrip(':') 688 args = lines[2].lstrip(':').split('\0') 689 args.pop() 690 values['args'] = args 691 cmdLines[pid] = values 692 return cmdLines 693 694def _parse_bitbake_buildstats(writer, state, filename, file): 695 paths = filename.split("/") 696 task = paths[-1] 697 pn = paths[-2] 698 start = None 699 end = None 700 for line in file: 701 if line.startswith("Started:"): 702 start = int(float(line.split()[-1])) 703 elif line.startswith("Ended:"): 704 end = int(float(line.split()[-1])) 705 if start and end: 706 state.add_process(pn + ":" + task, start, end) 707 708def get_num_cpus(headers): 709 """Get the number of CPUs from the system.cpu header property. As the 710 CPU utilization graphs are relative, the number of CPUs currently makes 711 no difference.""" 712 if headers is None: 713 return 1 714 if headers.get("system.cpu.num"): 715 return max (int (headers.get("system.cpu.num")), 1) 716 cpu_model = headers.get("system.cpu") 717 if cpu_model is None: 718 return 1 719 mat = re.match(".*\\((\\d+)\\)", cpu_model) 720 if mat is None: 721 return 1 722 return max (int(mat.group(1)), 1) 723 724def _do_parse(writer, state, filename, file): 725 writer.info("parsing '%s'" % filename) 726 t1 = time.process_time() 727 name = os.path.basename(filename) 728 if name == "proc_diskstats.log": 729 state.disk_stats = _parse_proc_disk_stat_log(file) 730 elif name == "reduced_proc_diskstats.log": 731 state.disk_stats = _parse_reduced_log(file, DiskSample) 732 elif name == "proc_stat.log": 733 state.cpu_stats = _parse_proc_stat_log(file) 734 elif name == "reduced_proc_stat.log": 735 state.cpu_stats = _parse_reduced_log(file, CPUSample) 736 elif name == "proc_meminfo.log": 737 state.mem_stats = _parse_proc_meminfo_log(file) 738 elif name == "reduced_proc_meminfo.log": 739 state.mem_stats = _parse_reduced_proc_meminfo_log(file) 740 elif name == "cmdline2.log": 741 state.cmdline = _parse_cmdline_log(writer, file) 742 elif name == "monitor_disk.log": 743 state.monitor_disk = _parse_monitor_disk_log(file) 744 elif not filename.endswith('.log'): 745 _parse_bitbake_buildstats(writer, state, filename, file) 746 t2 = time.process_time() 747 writer.info(" %s seconds" % str(t2-t1)) 748 return state 749 750def parse_file(writer, state, filename): 751 if state.filename is None: 752 state.filename = filename 753 basename = os.path.basename(filename) 754 with open(filename, "r") as file: 755 return _do_parse(writer, state, filename, file) 756 757def parse_paths(writer, state, paths): 758 for path in paths: 759 if state.filename is None: 760 state.filename = path 761 root, extension = os.path.splitext(path) 762 if not(os.path.exists(path)): 763 writer.warn("warning: path '%s' does not exist, ignoring." % path) 764 continue 765 #state.filename = path 766 if os.path.isdir(path): 767 files = sorted([os.path.join(path, f) for f in os.listdir(path)]) 768 state = parse_paths(writer, state, files) 769 elif extension in [".tar", ".tgz", ".gz"]: 770 if extension == ".gz": 771 root, extension = os.path.splitext(root) 772 if extension != ".tar": 773 writer.warn("warning: can only handle zipped tar files, not zipped '%s'-files; ignoring" % extension) 774 continue 775 tf = None 776 try: 777 writer.status("parsing '%s'" % path) 778 tf = tarfile.open(path, 'r:*') 779 for name in tf.getnames(): 780 state = _do_parse(writer, state, name, tf.extractfile(name)) 781 except tarfile.ReadError as error: 782 raise ParseError("error: could not read tarfile '%s': %s." % (path, error)) 783 finally: 784 if tf != None: 785 tf.close() 786 else: 787 state = parse_file(writer, state, path) 788 return state 789 790def split_res(res, options): 791 """ Split the res into n pieces """ 792 res_list = [] 793 if options.num > 1: 794 s_list = sorted(res.start.keys()) 795 frag_size = len(s_list) / float(options.num) 796 # Need the top value 797 if frag_size > int(frag_size): 798 frag_size = int(frag_size + 1) 799 else: 800 frag_size = int(frag_size) 801 802 start = 0 803 end = frag_size 804 while start < end: 805 state = Trace(None, [], None) 806 if options.full_time: 807 state.min = min(res.start.keys()) 808 state.max = max(res.end.keys()) 809 for i in range(start, end): 810 # Add this line for reference 811 #state.add_process(pn + ":" + task, start, end) 812 for p in res.start[s_list[i]]: 813 state.add_process(p, s_list[i], res.processes[p][1]) 814 start = end 815 end = end + frag_size 816 if end > len(s_list): 817 end = len(s_list) 818 res_list.append(state) 819 else: 820 res_list.append(res) 821 return res_list 822