mirror of https://github.com/xemu-project/xemu.git
simpletrace: refactor to separate responsibilities
Moved event_mapping and event_id_to_name down one level in the function call-stack to keep variable instantiation and usage closer (`process` and `run` has no use of the variables; `read_trace_records` does). Instead of passing event_mapping and event_id_to_name to the bottom of the call-stack, we move their use to `read_trace_records`. This separates responsibility and ownership of the information. `read_record` now just reads the arguments from the file-object by knowning the total number of bytes. Parsing it to specific arguments is moved up to `read_trace_records`. Special handling of dropped events removed, as they can be handled by the general code. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mads Ynddal <m.ynddal@samsung.com> Message-id: 20230926103436.25700-10-mads@ynddal.dk Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
87617b9ae6
commit
6f53641a98
|
@ -31,6 +31,7 @@ record_type_event = 1
|
||||||
|
|
||||||
log_header_fmt = '=QQQ'
|
log_header_fmt = '=QQQ'
|
||||||
rec_header_fmt = '=QQII'
|
rec_header_fmt = '=QQII'
|
||||||
|
rec_header_fmt_len = struct.calcsize(rec_header_fmt)
|
||||||
|
|
||||||
class SimpleException(Exception):
|
class SimpleException(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -43,35 +44,6 @@ def read_header(fobj, hfmt):
|
||||||
raise SimpleException('Error reading header. Wrong filetype provided?')
|
raise SimpleException('Error reading header. Wrong filetype provided?')
|
||||||
return struct.unpack(hfmt, hdr)
|
return struct.unpack(hfmt, hdr)
|
||||||
|
|
||||||
def get_record(event_mapping, event_id_to_name, rechdr, fobj):
|
|
||||||
"""Deserialize a trace record from a file into a tuple
|
|
||||||
(name, timestamp, pid, arg1, ..., arg6)."""
|
|
||||||
event_id, timestamp_ns, length, pid = rechdr
|
|
||||||
if event_id != dropped_event_id:
|
|
||||||
name = event_id_to_name[event_id]
|
|
||||||
try:
|
|
||||||
event = event_mapping[name]
|
|
||||||
except KeyError as e:
|
|
||||||
raise SimpleException(
|
|
||||||
f'{e} event is logged but is not declared in the trace events'
|
|
||||||
'file, try using trace-events-all instead.'
|
|
||||||
)
|
|
||||||
|
|
||||||
rec = (name, timestamp_ns, pid)
|
|
||||||
for type, name in event.args:
|
|
||||||
if is_string(type):
|
|
||||||
l = fobj.read(4)
|
|
||||||
(len,) = struct.unpack('=L', l)
|
|
||||||
s = fobj.read(len)
|
|
||||||
rec = rec + (s,)
|
|
||||||
else:
|
|
||||||
(value,) = struct.unpack('=Q', fobj.read(8))
|
|
||||||
rec = rec + (value,)
|
|
||||||
else:
|
|
||||||
(dropped_count,) = struct.unpack('=Q', fobj.read(8))
|
|
||||||
rec = ("dropped", timestamp_ns, pid, dropped_count)
|
|
||||||
return rec
|
|
||||||
|
|
||||||
def get_mapping(fobj):
|
def get_mapping(fobj):
|
||||||
(event_id, ) = struct.unpack('=Q', fobj.read(8))
|
(event_id, ) = struct.unpack('=Q', fobj.read(8))
|
||||||
(len, ) = struct.unpack('=L', fobj.read(4))
|
(len, ) = struct.unpack('=L', fobj.read(4))
|
||||||
|
@ -79,10 +51,11 @@ def get_mapping(fobj):
|
||||||
|
|
||||||
return (event_id, name)
|
return (event_id, name)
|
||||||
|
|
||||||
def read_record(event_mapping, event_id_to_name, fobj):
|
def read_record(fobj):
|
||||||
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
|
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args)."""
|
||||||
rechdr = read_header(fobj, rec_header_fmt)
|
event_id, timestamp_ns, record_length, record_pid = read_header(fobj, rec_header_fmt)
|
||||||
return get_record(event_mapping, event_id_to_name, rechdr, fobj)
|
args_payload = fobj.read(record_length - rec_header_fmt_len)
|
||||||
|
return (event_id, timestamp_ns, record_pid, args_payload)
|
||||||
|
|
||||||
def read_trace_header(fobj):
|
def read_trace_header(fobj):
|
||||||
"""Read and verify trace file header"""
|
"""Read and verify trace file header"""
|
||||||
|
@ -97,17 +70,28 @@ def read_trace_header(fobj):
|
||||||
if log_version != 4:
|
if log_version != 4:
|
||||||
raise ValueError(f'Log format {log_version} not supported with this QEMU release!')
|
raise ValueError(f'Log format {log_version} not supported with this QEMU release!')
|
||||||
|
|
||||||
def read_trace_records(event_mapping, event_id_to_name, fobj):
|
def read_trace_records(events, fobj, read_header):
|
||||||
"""Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
|
"""Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6).
|
||||||
|
|
||||||
Note that `event_id_to_name` is modified if the file contains mapping records.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
event_mapping (str -> Event): events dict, indexed by name
|
event_mapping (str -> Event): events dict, indexed by name
|
||||||
event_id_to_name (int -> str): event names dict, indexed by event ID
|
|
||||||
fobj (file): input file
|
fobj (file): input file
|
||||||
|
read_header (bool): whether headers were read from fobj
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
frameinfo = inspect.getframeinfo(inspect.currentframe())
|
||||||
|
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
|
||||||
|
frameinfo.lineno + 1, frameinfo.filename)
|
||||||
|
|
||||||
|
event_mapping = {e.name: e for e in events}
|
||||||
|
event_mapping["dropped"] = dropped_event
|
||||||
|
event_id_to_name = {dropped_event_id: "dropped"}
|
||||||
|
|
||||||
|
# If there is no header assume event ID mapping matches events list
|
||||||
|
if not read_header:
|
||||||
|
for event_id, event in enumerate(events):
|
||||||
|
event_id_to_name[event_id] = event.name
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
t = fobj.read(8)
|
t = fobj.read(8)
|
||||||
if len(t) == 0:
|
if len(t) == 0:
|
||||||
|
@ -115,12 +99,35 @@ def read_trace_records(event_mapping, event_id_to_name, fobj):
|
||||||
|
|
||||||
(rectype, ) = struct.unpack('=Q', t)
|
(rectype, ) = struct.unpack('=Q', t)
|
||||||
if rectype == record_type_mapping:
|
if rectype == record_type_mapping:
|
||||||
event_id, name = get_mapping(fobj)
|
event_id, event_name = get_mapping(fobj)
|
||||||
event_id_to_name[event_id] = name
|
event_id_to_name[event_id] = event_name
|
||||||
else:
|
else:
|
||||||
rec = read_record(event_mapping, event_id_to_name, fobj)
|
event_id, timestamp_ns, pid, args_payload = read_record(fobj)
|
||||||
|
event_name = event_id_to_name[event_id]
|
||||||
|
|
||||||
yield rec
|
try:
|
||||||
|
event = event_mapping[event_name]
|
||||||
|
except KeyError as e:
|
||||||
|
raise SimpleException(
|
||||||
|
f'{e} event is logged but is not declared in the trace events'
|
||||||
|
'file, try using trace-events-all instead.'
|
||||||
|
)
|
||||||
|
|
||||||
|
offset = 0
|
||||||
|
args = []
|
||||||
|
for type, _ in event.args:
|
||||||
|
if is_string(type):
|
||||||
|
(length,) = struct.unpack_from('=L', args_payload, offset=offset)
|
||||||
|
offset += 4
|
||||||
|
s = args_payload[offset:offset+length]
|
||||||
|
offset += length
|
||||||
|
args.append(s)
|
||||||
|
else:
|
||||||
|
(value,) = struct.unpack_from('=Q', args_payload, offset=offset)
|
||||||
|
offset += 8
|
||||||
|
args.append(value)
|
||||||
|
|
||||||
|
yield (event_mapping[event_name], event_name, timestamp_ns, pid) + tuple(args)
|
||||||
|
|
||||||
class Analyzer:
|
class Analyzer:
|
||||||
"""A trace file analyzer which processes trace records.
|
"""A trace file analyzer which processes trace records.
|
||||||
|
@ -202,20 +209,6 @@ def process(events, log, analyzer, read_header=True):
|
||||||
if read_header:
|
if read_header:
|
||||||
read_trace_header(log)
|
read_trace_header(log)
|
||||||
|
|
||||||
frameinfo = inspect.getframeinfo(inspect.currentframe())
|
|
||||||
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
|
|
||||||
frameinfo.lineno + 1, frameinfo.filename)
|
|
||||||
event_mapping = {"dropped": dropped_event}
|
|
||||||
event_id_to_name = {dropped_event_id: "dropped"}
|
|
||||||
|
|
||||||
for event in events_list:
|
|
||||||
event_mapping[event.name] = event
|
|
||||||
|
|
||||||
# If there is no header assume event ID mapping matches events list
|
|
||||||
if not read_header:
|
|
||||||
for event_id, event in enumerate(events_list):
|
|
||||||
event_id_to_name[event_id] = event.name
|
|
||||||
|
|
||||||
def build_fn(analyzer, event):
|
def build_fn(analyzer, event):
|
||||||
if isinstance(event, str):
|
if isinstance(event, str):
|
||||||
return analyzer.catchall
|
return analyzer.catchall
|
||||||
|
@ -238,12 +231,10 @@ def process(events, log, analyzer, read_header=True):
|
||||||
|
|
||||||
with analyzer:
|
with analyzer:
|
||||||
fn_cache = {}
|
fn_cache = {}
|
||||||
for rec in read_trace_records(event_mapping, event_id_to_name, log):
|
for event, event_id, timestamp_ns, record_pid, *rec_args in read_trace_records(events, log, read_header):
|
||||||
event_num = rec[0]
|
if event_id not in fn_cache:
|
||||||
event = event_mapping[event_num]
|
fn_cache[event_id] = build_fn(analyzer, event)
|
||||||
if event_num not in fn_cache:
|
fn_cache[event_id](event, (event_id, timestamp_ns, record_pid, *rec_args))
|
||||||
fn_cache[event_num] = build_fn(analyzer, event)
|
|
||||||
fn_cache[event_num](event, rec)
|
|
||||||
|
|
||||||
if close_log:
|
if close_log:
|
||||||
log.close()
|
log.close()
|
||||||
|
|
Loading…
Reference in New Issue