1 | # -*- coding: utf-8 -*- |
---|
2 | """ |
---|
3 | ------------------------------------------------------------------------------ |
---|
4 | Mango 802.11 Reference Design Experiments Framework - Log Utilities |
---|
5 | ------------------------------------------------------------------------------ |
---|
6 | License: Copyright 2019 Mango Communications, Inc. All rights reserved. |
---|
7 | Use and distribution subject to terms in LICENSE.txt |
---|
8 | ------------------------------------------------------------------------------ |
---|
9 | |
---|
10 | This module provides utility functions for handling wlan_exp log data. |
---|
11 | |
---|
12 | Naming convention:: |
---|
13 | log_data -- The binary data from a wlan_exp node's log. |
---|
14 | |
---|
15 | raw_log_index -- This is an index that has not been interpreted / filtered |
---|
16 | and corresponds 1-to-1 with what is in given log_data. |
---|
17 | The defining characteristic of a raw_log_index is that |
---|
18 | the dictionary keys are all integers: |
---|
19 | { <int> : [<offsets>] } |
---|
20 | |
---|
21 | log_index -- A log_index is any index that is not a raw_log_index. In |
---|
22 | general, this will be a interpreted / filtered version of |
---|
23 | a raw_log_index. |
---|
24 | |
---|
25 | numpy -- A python package that allows easy and fast manipulation of |
---|
26 | large data sets. You can find more documentaiton on numpy at: |
---|
27 | http://www.numpy.org/ |
---|
28 | |
---|
29 | """ |
---|
30 | |
---|
31 | __all__ = ['gen_raw_log_index', |
---|
32 | 'filter_log_index', |
---|
33 | 'log_data_to_np_arrays'] |
---|
34 | |
---|
35 | |
---|
36 | # ----------------------------------------------------------------------------- |
---|
37 | # Top level check for memory configuration |
---|
38 | # ----------------------------------------------------------------------------- |
---|
39 | import sys |
---|
40 | |
---|
41 | if (sys.maxsize <= 2**32): |
---|
42 | print("\n" + ("-" * 75)) |
---|
43 | print("WARNING: Processing large log files requires 64-bit python.") |
---|
44 | print(("-" * 75) + "\n") |
---|
45 | |
---|
46 | |
---|
47 | # Fix to support Python 2.x and 3.x |
---|
48 | if sys.version[0]=="3": long=None |
---|
49 | |
---|
50 | |
---|
51 | # ----------------------------------------------------------------------------- |
---|
52 | # Log Container base class |
---|
53 | # ----------------------------------------------------------------------------- |
---|
54 | class LogContainer(object): |
---|
55 | """Base class to define a log container.""" |
---|
56 | file_handle = None |
---|
57 | |
---|
58 | def __init__(self, file_handle=None): |
---|
59 | self.file_handle = file_handle |
---|
60 | |
---|
61 | def set_file_handle(self, file_handle): |
---|
62 | self.file_handle = file_handle |
---|
63 | |
---|
64 | def is_valid(self): raise NotImplementedError |
---|
65 | |
---|
66 | def write_log_data(self, log_data, append=True): raise NotImplementedError |
---|
67 | def write_log_index(self, log_index=None): raise NotImplementedError |
---|
68 | def write_attr_dict(self, attr_dict): raise NotImplementedError |
---|
69 | |
---|
70 | def replace_log_data(self, log_data): raise NotImplementedError |
---|
71 | |
---|
72 | def get_log_data_size(self): raise NotImplementedError |
---|
73 | def get_log_data(self): raise NotImplementedError |
---|
74 | def get_log_index(self, gen_index=True): raise NotImplementedError |
---|
75 | def get_attr_dict(self): raise NotImplementedError |
---|
76 | |
---|
77 | def trim_log_data(self): raise NotImplementedError |
---|
78 | |
---|
79 | # End class() |
---|
80 | |
---|
81 | |
---|
82 | |
---|
83 | # ----------------------------------------------------------------------------- |
---|
84 | # Log Utilities |
---|
85 | # ----------------------------------------------------------------------------- |
---|
86 | def gen_raw_log_index(log_data): |
---|
87 | """Parses binary wlan_exp log data by recording the byte index of each entry. |
---|
88 | |
---|
89 | Args: |
---|
90 | log_data (bytes): Binary data from a WlanExpNode log |
---|
91 | |
---|
92 | Returns: |
---|
93 | raw_log_index (dict): |
---|
94 | Dictionary that corresponds 1-to-1 with what is in the given log_data of the |
---|
95 | form: ``{ <int> : [<offsets>] }`` |
---|
96 | |
---|
97 | The byte indexes are returned in a dictionary with the entry type IDs as keys. This |
---|
98 | method does not unpack or interpret each log entry and does not change any values |
---|
99 | in the log file itself (the log_data array argument can be read-only). |
---|
100 | |
---|
101 | Format of log entry header: |
---|
102 | :: |
---|
103 | |
---|
104 | typedef struct{ |
---|
105 | u32 delimiter; |
---|
106 | u16 entry_type; |
---|
107 | u16 entry_length; |
---|
108 | } entry_header; |
---|
109 | |
---|
110 | fmt_log_hdr = 'I H H' # Using struct.unpack |
---|
111 | |
---|
112 | """ |
---|
113 | |
---|
114 | offset = 0 |
---|
115 | hdr_size = 8 |
---|
116 | log_len = len(log_data) |
---|
117 | log_index = dict() |
---|
118 | use_byte_array = 0 |
---|
119 | |
---|
120 | # If the log_data is empty, return empty dictionary |
---|
121 | if (log_len == 0): |
---|
122 | return log_index |
---|
123 | |
---|
124 | # Is data type of log_bytes a byte array or string because the data has |
---|
125 | # to be handled differently |
---|
126 | try: |
---|
127 | byte_array_test = log_data[offset:offset+hdr_size] |
---|
128 | byte_array_test = ord(byte_array_test[0]) |
---|
129 | except TypeError: |
---|
130 | use_byte_array = 1 |
---|
131 | |
---|
132 | |
---|
133 | while True: |
---|
134 | # Stop here if the next log entry header is incomplete |
---|
135 | if( (offset + hdr_size) > log_len): |
---|
136 | break |
---|
137 | |
---|
138 | # Check if entry starts with valid header. struct.unpack is the |
---|
139 | # natural way to interpret the entry header, but it's slower |
---|
140 | # than accessing the bytes directly. |
---|
141 | |
---|
142 | # hdr = unpack(fmt_log_hdr, log_bytes[offset:offset+hdr_size]) |
---|
143 | # ltk = hdr[1] |
---|
144 | # if( (hdr[0] & entries.WLAN_EXP_LOG_DELIM) != entries.WLAN_EXP_LOG_DELIM): |
---|
145 | # raise Exception("ERROR: Log file didn't start with valid entry header!") |
---|
146 | |
---|
147 | # Use raw byte slicing for better performance |
---|
148 | # Values below are hard coded to match current wlan_exp log entry formats |
---|
149 | hdr_b = log_data[offset:offset+hdr_size] |
---|
150 | |
---|
151 | if (use_byte_array): |
---|
152 | if( (bytearray(hdr_b[2:4]) != b'\xed\xac') ): |
---|
153 | raise Exception("ERROR: Log file didn't start with valid entry header (offset %d)!" % (offset)) |
---|
154 | |
---|
155 | entry_type_id = (hdr_b[4] + (hdr_b[5] * 256)) |
---|
156 | entry_size = (hdr_b[6] + (hdr_b[7] * 256)) |
---|
157 | else: |
---|
158 | if( (hdr_b[2:4] != b'\xed\xac') ): |
---|
159 | raise Exception("ERROR: Log file didn't start with valid entry header (offset %d)!" % (offset)) |
---|
160 | |
---|
161 | entry_type_id = (ord(hdr_b[4]) + (ord(hdr_b[5]) * 256)) |
---|
162 | entry_size = (ord(hdr_b[6]) + (ord(hdr_b[7]) * 256)) |
---|
163 | |
---|
164 | offset += hdr_size |
---|
165 | |
---|
166 | # Stop here if the last log entry is incomplete |
---|
167 | if( (offset + entry_size) > log_len): |
---|
168 | break |
---|
169 | |
---|
170 | # Try/except slightly faster than "if(entry_type_id in log_index.keys()):" |
---|
171 | # ~3 seconds faster (13s -> 10s) for ~1GB log file |
---|
172 | try: |
---|
173 | log_index[entry_type_id].append(offset) |
---|
174 | except KeyError: |
---|
175 | log_index[entry_type_id] = [offset] |
---|
176 | |
---|
177 | # Increment the byte offset for the next iteration |
---|
178 | offset += entry_size |
---|
179 | |
---|
180 | # Remove all NULL entries from the log_index |
---|
181 | try: |
---|
182 | del log_index[0] |
---|
183 | except KeyError: |
---|
184 | pass |
---|
185 | |
---|
186 | return log_index |
---|
187 | |
---|
188 | # End gen_log_index_raw() |
---|
189 | |
---|
190 | |
---|
191 | |
---|
192 | def filter_log_index(log_index, include_only=None, exclude=None, merge=None, verbose=False): |
---|
193 | """Parses a log index to generate a filtered log index. |
---|
194 | |
---|
195 | Args: |
---|
196 | log_index (dict): Log index dictionary (can be either a 'raw_log_index' or a |
---|
197 | previously processed 'log_index') |
---|
198 | include_only (list of WlanExpLogEntryType, optional): All WlanExpLogEntryType to |
---|
199 | include in the output log index. This takes precedence over 'exclude'. |
---|
200 | exclude (list of WlanExpLogEntryType, optional): All WlanExpLogEntryType to |
---|
201 | exclude in the output log index. This will not be used if include != None. |
---|
202 | merge (dict, optional): Dictionary of the form: |
---|
203 | ``{'WlanExpLogEntryType name': [List of 'WlanExpLogEntryTypes name' to merge]}`` |
---|
204 | |
---|
205 | Returns: |
---|
206 | log_index (dict): Filtered log index dictionary based on the given parameters |
---|
207 | |
---|
208 | |
---|
209 | Consumers, in general, cannot operate on a raw log index since that has |
---|
210 | not been converted in to log entry types. The besides filtering a log |
---|
211 | index, this method will also convert any raw index entries (ie entries |
---|
212 | with keys of type int) in to the corresponding WlanExpLogEntryTypes. |
---|
213 | |
---|
214 | Using the 'merge' argument can combine the indexes of WlanExpLogEntryTypes to |
---|
215 | create super-sets of entries. For example, to create a log index that |
---|
216 | contains all the receive events use: ``{'RX_ALL': ['RX_OFDM', 'RX_DSSS']}`` |
---|
217 | as long as the names 'RX_ALL', 'RX_OFDM', and 'RX_DSSS' are valid WlanExpLogEntryTypes. |
---|
218 | |
---|
219 | The filter follows the following basic rules: |
---|
220 | #. Every requested output (either through 'include_only' or 'merge') has a key in the output dictionary |
---|
221 | #. All input and output keys must refer to the 'name' property of valid WlanExpLogEntryType instances |
---|
222 | |
---|
223 | **Examples:** |
---|
224 | |
---|
225 | Assume: |
---|
226 | - 'A', 'B', 'C', 'D', 'M' are valid WlanExpLogEntryType instance names |
---|
227 | - The log_index = {'A': [A0, A1, A2], 'B': [B0, B1], 'C': []} |
---|
228 | |
---|
229 | * **include_only**: |
---|
230 | All names specified in 'include_only' are included as part of the |
---|
231 | output dictionary. It is then up to the consumer to check if the |
---|
232 | number of entries for a given 'name' is zero (ie the list is empty). |
---|
233 | :: |
---|
234 | |
---|
235 | x = filter_log_index(log_index, include_only=['A']) |
---|
236 | x == {'A': [A0, A1, A2]} |
---|
237 | |
---|
238 | x = filter_log_index(log_index, include_only=['A',B']) |
---|
239 | x == {'A': [A0, A1, A2], 'B': [B0, B1]} |
---|
240 | |
---|
241 | x = filter_log_index(log_index, include_only=['C']) |
---|
242 | x == {'C': []]} |
---|
243 | |
---|
244 | x = filter_log_index(log_index, include_only=['D']) |
---|
245 | x == {'D': []]} |
---|
246 | |
---|
247 | * **exclude**: |
---|
248 | All names specified in 'exclude' are removed from the output dictionary. |
---|
249 | However, there is no guarentee what other WlanExpLogEntryTypes are in |
---|
250 | the output dictionary. That depends on the entries in the input log index. |
---|
251 | :: |
---|
252 | |
---|
253 | x = filter_log_index(log_index, exclude=['B']) |
---|
254 | x == {'A': [A0, A1, A2]}, 'C': []} |
---|
255 | |
---|
256 | x = filter_log_index(log_index, exclude=['D']) |
---|
257 | WARNING: D does not exist in log index. Ignoring for exclude. |
---|
258 | x == {'A': [A0, A1, A2]}, 'B': [B0, B1], 'C': []} |
---|
259 | |
---|
260 | * **merge**: |
---|
261 | All names specified in the 'merge' are included as part of the output |
---|
262 | dictionary. It is then up to the consumer to check if the number of |
---|
263 | entries for a given 'name' is zero (ie the list is empty). |
---|
264 | :: |
---|
265 | |
---|
266 | x = filter_log_index(log_index, merge={'D': ['A', 'B']} |
---|
267 | x == {'A': [A0, A1, A2], |
---|
268 | 'B': [B0, B1], |
---|
269 | 'C': [], |
---|
270 | 'D': [A0, A1, A2, B0, B1]} |
---|
271 | |
---|
272 | x = filter_log_index(log_index, merge={'M': ['C', 'D']} |
---|
273 | x == {'A': [A0,A1,A2]}, |
---|
274 | 'B': [B0,B1], |
---|
275 | 'C': [], |
---|
276 | 'M': []} |
---|
277 | |
---|
278 | * **Combined**: |
---|
279 | Combining the behavior of 'include_only', 'exclude', and 'merge' |
---|
280 | :: |
---|
281 | |
---|
282 | x = filter_log_index(log_index, include_only=['M'], merge={'M': ['A','C']} |
---|
283 | x == {'M': [A0, A1, A2]} |
---|
284 | |
---|
285 | x = filter_log_index(log_index, include_only=['M'], merge={'M': ['A','D']} |
---|
286 | WARNING: D does not exist in log index. Ignoring for merge. |
---|
287 | x == {'M': [A0, A1, A2]} |
---|
288 | |
---|
289 | x = filter_log_index(log_index, include_only=['M'], merge={'M': ['C','D']} |
---|
290 | WARNING: D does not exist in log index. Ignoring for merge. |
---|
291 | x == {'M': []} |
---|
292 | |
---|
293 | """ |
---|
294 | from .entry_types import log_entry_types |
---|
295 | import collections |
---|
296 | |
---|
297 | ret_log_index = {} |
---|
298 | summary = "-" * 50 + "\n" |
---|
299 | summary += "Log Index Filter Summary:\n" |
---|
300 | |
---|
301 | if (include_only is not None) and (not isinstance(include_only, collections.Iterable)): |
---|
302 | raise TypeError("Parameter 'include_only' has type {0} - must be iterable.\n".format(type(include_only))) |
---|
303 | |
---|
304 | if (exclude is not None) and (not isinstance(exclude, collections.Iterable)): |
---|
305 | raise TypeError("Parameter 'exclude' has type {0} - must be iterable.\n".format(type(exclude))) |
---|
306 | |
---|
307 | if (merge is not None) and (not isinstance(merge, dict)): |
---|
308 | raise TypeError("Parameter 'merge' has type {0} - must be a dictionary.\n".format(type(merge))) |
---|
309 | |
---|
310 | # Copy the log_index to initially populate the return log index |
---|
311 | ret_log_index = dict(log_index) |
---|
312 | |
---|
313 | # Filter the log_index |
---|
314 | |
---|
315 | # Create any new log indexes through the merge dictionary |
---|
316 | if merge is not None: |
---|
317 | summary += "\nMERGE:" |
---|
318 | |
---|
319 | # For each new merged index output |
---|
320 | for k in merge.keys(): |
---|
321 | new_index = [] |
---|
322 | merge_tmp = "" |
---|
323 | |
---|
324 | for v in merge[k]: |
---|
325 | # Try to merge indexes. ret_log_index could have keys of either |
---|
326 | # type <int> or type <WlanExpLogEntryType>. Also, the value of |
---|
327 | # v in the merge list could be either a <str> or <int>. Therefore, |
---|
328 | # try both cases before ignoring the item in the list since <str> |
---|
329 | # hashes to the appropriate <WlanExpLogEntryType> but <int> does |
---|
330 | # not. |
---|
331 | index = [] |
---|
332 | |
---|
333 | try: |
---|
334 | index = ret_log_index[v] |
---|
335 | merge_tmp += " {0} ({1} entries)\n".format(log_entry_types[v], len(index)) |
---|
336 | except KeyError: |
---|
337 | try: |
---|
338 | index = ret_log_index[log_entry_types[v].entry_type_id] |
---|
339 | merge_tmp += " {0} ({1} entries)\n".format(log_entry_types[v], len(index)) |
---|
340 | except KeyError: |
---|
341 | merge_tmp += " {0} had no entries in log index. Ignored for merge.\n".format(v) |
---|
342 | |
---|
343 | new_index += index |
---|
344 | |
---|
345 | |
---|
346 | # If this merge is going to replace one of the entry types in the |
---|
347 | # current index, then delete the previous entry. This is necessary |
---|
348 | # because at this point, there is a mixture of keys, some are entry |
---|
349 | # type ids and some are log entry types. |
---|
350 | try: |
---|
351 | del ret_log_index[log_entry_types[k].entry_type_id] |
---|
352 | except KeyError: |
---|
353 | pass |
---|
354 | |
---|
355 | # Add the new merged index lists to the output dictionary |
---|
356 | # Use the type instance corresponding to the user-supplied string as the key |
---|
357 | ret_log_index[log_entry_types[k]] = sorted(new_index) |
---|
358 | |
---|
359 | summary += "\n {0} ({1} entries) contains:\n".format(log_entry_types[k], len(new_index)) |
---|
360 | summary += merge_tmp |
---|
361 | |
---|
362 | # Filter the resulting log index by 'include' / 'exclude' lists |
---|
363 | if include_only is not None: |
---|
364 | summary += "\nINCLUDE ONLY:\n" |
---|
365 | |
---|
366 | new_log_index = {} |
---|
367 | |
---|
368 | for entry_name in include_only: |
---|
369 | try: |
---|
370 | new_log_index[log_entry_types[entry_name]] = [] |
---|
371 | |
---|
372 | for k in ret_log_index.keys(): |
---|
373 | # Need to handle the case when the keys are <int> vs <WlanExpLogEntryType> |
---|
374 | if (type(k) is int) or (type(k) is long): |
---|
375 | if k == log_entry_types[entry_name].entry_type_id: |
---|
376 | new_log_index[log_entry_types[entry_name]] = ret_log_index[k] |
---|
377 | else: |
---|
378 | if k == entry_name: |
---|
379 | new_log_index[k] = ret_log_index[k] |
---|
380 | |
---|
381 | summary += " {0} added to output.\n".format(log_entry_types[entry_name]) |
---|
382 | except KeyError: |
---|
383 | summary += " {0} ignored for include. Could not find entry type with that name.\n".format(entry_name) |
---|
384 | |
---|
385 | ret_log_index = new_log_index |
---|
386 | else: |
---|
387 | if exclude is not None: |
---|
388 | summary += "\nEXCLUDE:\n" |
---|
389 | |
---|
390 | for unwanted_key in exclude: |
---|
391 | try: |
---|
392 | del ret_log_index[unwanted_key] |
---|
393 | summary += " {0} removed from index.\n".format(unwanted_key) |
---|
394 | except KeyError: |
---|
395 | summary += " {0} does not exist in log index. Ignored for exclude.\n".format(unwanted_key) |
---|
396 | |
---|
397 | |
---|
398 | # Translate the keys in the return log index to WlanExpLogEntryType |
---|
399 | ret_log_index = _translate_log_index_keys(ret_log_index) |
---|
400 | |
---|
401 | if verbose: |
---|
402 | summary += "-" * 50 + "\n" |
---|
403 | print(summary) |
---|
404 | |
---|
405 | return ret_log_index |
---|
406 | |
---|
407 | # End def |
---|
408 | |
---|
409 | |
---|
410 | |
---|
411 | def log_data_to_np_arrays(log_data, log_index): |
---|
412 | """Generate numpy structured arrays using log_data and a log_index. |
---|
413 | |
---|
414 | Args: |
---|
415 | log_data (bytes): Binary data from a WlanExpNode log |
---|
416 | log_index (dict): Log index dictionary |
---|
417 | |
---|
418 | Return: |
---|
419 | np_array (Numpy Array): Numpy structured arrays corresponding to the log_data and log_index |
---|
420 | """ |
---|
421 | entries_nd = dict() |
---|
422 | |
---|
423 | for k in log_index.keys(): |
---|
424 | # Build a structured array with one element for each byte range enumerated above |
---|
425 | # Store each array in a dictionary indexed by the log entry type |
---|
426 | try: |
---|
427 | entries_nd[k] = k.generate_numpy_array(log_data, log_index[k]) |
---|
428 | except IndexError: |
---|
429 | # No entries of the given type |
---|
430 | entries_nd[k] = [] |
---|
431 | |
---|
432 | return entries_nd |
---|
433 | |
---|
434 | # End def |
---|
435 | |
---|
436 | |
---|
437 | |
---|
438 | def _translate_log_index_keys(log_index): |
---|
439 | # Translate the keys in the return log index to WlanExpLogEntryType |
---|
440 | from .entry_types import log_entry_types |
---|
441 | |
---|
442 | new_log_index = {} |
---|
443 | |
---|
444 | for k in log_index.keys(): |
---|
445 | try: |
---|
446 | new_log_index[log_entry_types[k]] = log_index[k] |
---|
447 | except KeyError as err: |
---|
448 | msg = "Issue generating log_index:\n" |
---|
449 | msg += " Could not find entry type with name: {0}".format(err) |
---|
450 | raise AttributeError(msg) |
---|
451 | |
---|
452 | return new_log_index |
---|
453 | |
---|
454 | # End def |
---|
455 | |
---|
456 | |
---|
457 | |
---|
458 | # ----------------------------------------------------------------------------- |
---|
459 | # Log Misc Utilities |
---|
460 | # ----------------------------------------------------------------------------- |
---|
461 | def get_entry_constants(entry_type): |
---|
462 | """Get a copy of constants for given log entry type |
---|
463 | |
---|
464 | Args: |
---|
465 | entry_type (str): String constant for log entry type |
---|
466 | |
---|
467 | Returns: |
---|
468 | constants (consts_dict): Constants data structure. Fields are |
---|
469 | accessed via "." notation or "[]" notation as immutable properties. |
---|
470 | """ |
---|
471 | from .entry_types import log_entry_types |
---|
472 | |
---|
473 | consts = None |
---|
474 | |
---|
475 | try: |
---|
476 | consts = log_entry_types[entry_type].consts.copy() |
---|
477 | except: |
---|
478 | print("WARNING: Entry type '{0}' does not exist in log_entry_types.".format(entry_type)) |
---|
479 | |
---|
480 | return consts |
---|
481 | |
---|
482 | # End def |
---|
483 | |
---|
484 | |
---|
485 | def merge_log_indexes(dest_index, src_index, offset): |
---|
486 | """Merge log indexes. |
---|
487 | |
---|
488 | Both the ``dest_index`` and ``src_index`` have log entry offsets that are relative |
---|
489 | to the beginning of the log data from which they were generated. If the |
---|
490 | log data used to generate the log indexes are being merged, then move the |
---|
491 | log entry offsets in the ``src_index`` to their absolute offset in the merged |
---|
492 | log index. For each of the log entry offsets in the ``src_index``, the |
---|
493 | following translation will occur: |
---|
494 | :: |
---|
495 | |
---|
496 | <Offset in merged log index> = <Offset in src_index> + offset |
---|
497 | |
---|
498 | Args: |
---|
499 | dest_index (dict): Destination log index to merge ``src_index`` into |
---|
500 | src_index (dict): Source log index to merge into destination log index |
---|
501 | offset (int): Offset of ``src_index`` into ``dest_index`` |
---|
502 | |
---|
503 | """ |
---|
504 | return_val = dest_index |
---|
505 | |
---|
506 | for key in src_index.keys(): |
---|
507 | new_offsets = [x + offset for x in src_index[key]] |
---|
508 | |
---|
509 | try: |
---|
510 | return_val[key].append(new_offsets) |
---|
511 | except KeyError: |
---|
512 | return_val[key] = new_offsets |
---|
513 | |
---|
514 | return return_val |
---|
515 | |
---|
516 | # End def |
---|
517 | |
---|
518 | |
---|
519 | |
---|
520 | def calc_next_entry_offset(log_data, raw_log_index): |
---|
521 | """Calculates the offset of the next log entry given the log data and |
---|
522 | the raw log index. |
---|
523 | |
---|
524 | The log data does not necessarily end on a log entry boundary. Therefore, |
---|
525 | it is necessary to be able to calculate the offset of the next log entry |
---|
526 | so that it is possible to continue index generation when reading the log |
---|
527 | in multiple pieces. |
---|
528 | |
---|
529 | Args: |
---|
530 | log_data (bytes): Binary data from a WlanExpNode log |
---|
531 | log_index (dict): Raw log index dictionary |
---|
532 | |
---|
533 | Returns: |
---|
534 | offset (int): Offset of next log entry |
---|
535 | """ |
---|
536 | # See documentation above on header format |
---|
537 | hdr_size = 8 |
---|
538 | |
---|
539 | max_entry_offset_key = max(raw_log_index, key=raw_log_index.get) |
---|
540 | max_entry_offset = raw_log_index[max_entry_offset_key][-1] |
---|
541 | |
---|
542 | hdr_b = log_data[max_entry_offset - hdr_size : max_entry_offset] |
---|
543 | |
---|
544 | if( (bytearray(hdr_b[2:4]) != b'\xed\xac') ): |
---|
545 | raise Exception("ERROR: Offset not a valid entry header (offset {0})!".format(max_entry_offset)) |
---|
546 | |
---|
547 | entry_size = (hdr_b[6] + (hdr_b[7] * 256)) |
---|
548 | |
---|
549 | next_entry_header_offset = max_entry_offset + entry_size |
---|
550 | next_entry_offset = next_entry_header_offset + hdr_size |
---|
551 | |
---|
552 | return next_entry_offset |
---|
553 | |
---|
554 | # End def |
---|
555 | |
---|
556 | |
---|
557 | |
---|
558 | def overwrite_entries_with_null_entry(log_data, byte_offsets): |
---|
559 | """Overwrite the entries in byte_offsets with NULL entries. |
---|
560 | |
---|
561 | This is an in-place modification of log_data. |
---|
562 | |
---|
563 | Args: |
---|
564 | log_data (bytes): Binary data from a WlanExpNode log |
---|
565 | byte_offsets (list of int): List of offsets corresponding to the entries to overwrite |
---|
566 | """ |
---|
567 | # See documentation above on header format |
---|
568 | hdr_size = 8 |
---|
569 | |
---|
570 | for offset in byte_offsets: |
---|
571 | hdr_b = log_data[offset - hdr_size : offset] |
---|
572 | |
---|
573 | if( (bytearray(hdr_b[2:4]) != b'\xed\xac') ): |
---|
574 | raise Exception("ERROR: Offset not a valid entry header (offset {0})!".format(offset)) |
---|
575 | |
---|
576 | hdr_b[4:6] = bytearray([0] * 2) |
---|
577 | entry_size = (hdr_b[6] + (hdr_b[7] * 256)) |
---|
578 | |
---|
579 | # Write over the log entry with zeros |
---|
580 | log_data[offset : offset + entry_size] = bytearray([0] * entry_size) |
---|
581 | |
---|
582 | # End def |
---|
583 | |
---|
584 | |
---|
585 | |
---|
586 | def overwrite_payloads(log_data, byte_offsets, payload_offsets=None): |
---|
587 | """Overwrite any payloads with zeros. |
---|
588 | |
---|
589 | Args: |
---|
590 | log_data (bytes): Binary data from a WlanExpNode log |
---|
591 | byte_offsets (list of int): List of offsets corresponding to the entries to be modified |
---|
592 | payload_offsets (dict): Dictionary of ``{ entry_type_id : <payload offset> }`` |
---|
593 | |
---|
594 | By default, if ``payload_offsets`` is not specified, the method will iterate |
---|
595 | through all the entry types and calculate the defined size of the entry |
---|
596 | (ie it will use calcsize on the struct format of the entry). Sometimes, |
---|
597 | this is not the desired behavior and calling code would want to specify a |
---|
598 | different amount of the payload to keep. For example, for data |
---|
599 | transmissions / receptions, it might be desired to also keep the SNAP |
---|
600 | headers and potentially the IP headers. In this case, the calling code |
---|
601 | would get the appropriate set of byte_offsets and then create a |
---|
602 | ``payload_offsets`` dictionary with the desired "size" of the entry for those |
---|
603 | ``byte_offsets``. This will result in the calling code potentially calling |
---|
604 | this function multiple times with different ``payload_offsets`` for a given |
---|
605 | ``entry_type_id``. |
---|
606 | |
---|
607 | This method relies on the fact that for variable length log entries, the |
---|
608 | variable length data, ie the payload, is always at the end of the entry. |
---|
609 | The code also knows, based on the entry type, the size of the entry without |
---|
610 | the payload. Therefore, from the entry header, the code can determine how |
---|
611 | many payload bytes are after the defined fields and zero them out. |
---|
612 | |
---|
613 | This is an in-place modification of ``log_data``. |
---|
614 | """ |
---|
615 | import struct |
---|
616 | from entry_types import log_entry_types |
---|
617 | |
---|
618 | # See documentation above on header format |
---|
619 | hdr_size = 8 |
---|
620 | |
---|
621 | |
---|
622 | if payload_offsets is None: |
---|
623 | payload_offsets = {} |
---|
624 | |
---|
625 | # Create temp data structure: { entry_type_id : <payload offset>} |
---|
626 | for entry_type_id, entry_type in log_entry_types.items(): |
---|
627 | payload_offsets[entry_type_id] = struct.calcsize(entry_type.fields_fmt_struct) |
---|
628 | |
---|
629 | |
---|
630 | for offset in byte_offsets: |
---|
631 | hdr_b = log_data[offset - hdr_size : offset] |
---|
632 | |
---|
633 | if( (bytearray(hdr_b[2:4]) != b'\xed\xac') ): |
---|
634 | raise Exception("ERROR: Offset not a valid entry header (offset {0})!".format(offset)) |
---|
635 | |
---|
636 | entry_type_id = (hdr_b[4] + (hdr_b[5] * 256)) |
---|
637 | entry_size = (hdr_b[6] + (hdr_b[7] * 256)) |
---|
638 | |
---|
639 | try: |
---|
640 | len_offset = payload_offsets[entry_type_id] |
---|
641 | |
---|
642 | # Write over the log entry payload with zeros |
---|
643 | if entry_size > len_offset: |
---|
644 | log_data[offset + len_offset : offset + entry_size] = bytearray([0] * (entry_size - len_offset)) |
---|
645 | |
---|
646 | except KeyError: |
---|
647 | print("WARNING: Unknown entry type id {0} at offset {1}".format(entry_type_id, offset)) |
---|
648 | |
---|
649 | # End def |
---|
650 | |
---|
651 | |
---|
652 | def calc_tx_time_log(tx_low_entries): |
---|
653 | """Wrapper for calc_tx_time() that accepts an array of TX_LOW log entries instead of discrete mcs/length/etc arguments |
---|
654 | |
---|
655 | Args: |
---|
656 | tx_low_enetries: List (typically a Numpy array) of TX_LOW log entries |
---|
657 | """ |
---|
658 | return calc_tx_time(mcs=tx_low_entries['mcs'], |
---|
659 | phy_mode=tx_low_entries['phy_mode'], |
---|
660 | payload_length=tx_low_entries['length'], |
---|
661 | phy_samp_rate=tx_low_entries['phy_samp_rate']) |
---|
662 | |
---|
663 | def calc_tx_time(mcs, phy_mode, payload_length, phy_samp_rate): |
---|
664 | """Calculates the duration of an 802.11 transmission given its rate and |
---|
665 | payload length. Returns duration of PHY transmission in microseconds. |
---|
666 | |
---|
667 | Args: |
---|
668 | mcs (int or list of ints): Modulation and coding scheme (MCS) index |
---|
669 | phy_mode (str, int or list of strs or ints): PHY mode (from util.phy_modes) |
---|
670 | payload_length (int or list of ints): Nnumber of bytes in the payload |
---|
671 | phy_sample_rate (int or list of ints): PHY sample rate; only (10, 20, 40) are valid |
---|
672 | |
---|
673 | This method accounts only for PHY overhead (preamble, SIGNAL field, etc.). |
---|
674 | It does *not* account for MAC overhead. The payload_length argument must |
---|
675 | include any MAC fields (typically a 24-byte MAC header plus 4 byte FCS). |
---|
676 | |
---|
677 | All 4 arguments are required. The dimensions of the 4 arguments must match. To calculate |
---|
678 | the duration of a single packet, call this method with scalaer integer arguments. To |
---|
679 | calculate the duration of many packets, call this method with iterables (typically |
---|
680 | Numpy arrays) of integer values. When calling this method with arrays the lengths |
---|
681 | of the 4 arrays must be equal. |
---|
682 | """ |
---|
683 | import numpy as np |
---|
684 | import wlan_exp.util as util |
---|
685 | |
---|
686 | # Check for valid phy_samp_rate values |
---|
687 | if(not np.all( (phy_samp_rate == 10) + (phy_samp_rate == 20) + (phy_samp_rate == 40) )): |
---|
688 | raise AttributeError('Invalid phy_samp_rate - all phy_samp_rate values must be 10, 20 or 40') |
---|
689 | |
---|
690 | # Convert samp rates to lut indexes |
---|
691 | # Integer division by 15 is shortcut to map (10, 20, 40) to (0, 1, 2) |
---|
692 | samp_rate_idx = (phy_samp_rate // 15) |
---|
693 | |
---|
694 | # Lookup tables of waveform section durations in microseconds, indexed by samp_rate |
---|
695 | lut_T_PREAMBLE = (8, 16, 32) |
---|
696 | lut_T_SIG = (2, 4, 8) |
---|
697 | lut_T_SYM = (2, 4, 8) |
---|
698 | lut_T_EXT = (6, 6, 6) |
---|
699 | |
---|
700 | T_PREAMBLE = np.choose(samp_rate_idx, lut_T_PREAMBLE) |
---|
701 | T_SIG = np.choose(samp_rate_idx, lut_T_SIG) |
---|
702 | T_SYM = np.choose(samp_rate_idx, lut_T_SYM) |
---|
703 | T_EXT = np.choose(samp_rate_idx, lut_T_EXT) |
---|
704 | |
---|
705 | # (mcs, phy_mode) encodes number of data bits per symbol |
---|
706 | try: |
---|
707 | # LUT implementation (~2 sec for 150K entries) |
---|
708 | # - Construct NDBPS lookup table to be used during processing |
---|
709 | # |
---|
710 | # TODO: This implementation is dependent on the MCS range that is |
---|
711 | # not defined in wlan_exp.util. This function will need to be |
---|
712 | # updated if more MCS values are defined. |
---|
713 | ndbps_lut = {} |
---|
714 | |
---|
715 | for m in range(0, 8): |
---|
716 | phy_mode_lut = {} |
---|
717 | |
---|
718 | for p in util.phy_modes.values(): |
---|
719 | try: |
---|
720 | phy_mode_lut[p] = util.get_rate_info(m, p, 20)['NDBPS'] |
---|
721 | except: |
---|
722 | # Do nothing for undefined values |
---|
723 | pass |
---|
724 | |
---|
725 | ndbps_lut[m] = phy_mode_lut |
---|
726 | |
---|
727 | ndbps = np.array([ndbps_lut[m][p] for i, (m, p) in enumerate(zip(mcs, phy_mode))]) |
---|
728 | |
---|
729 | |
---|
730 | # Naive implementation (~7 sec for 150K entries) |
---|
731 | # - Get rate info for each entry to extract NDBPS |
---|
732 | # ndbps = np.array([util.get_rate_info(m, p, phy_samp_rate)['NDBPS'] for i, (m, p) in enumerate(zip(mcs, phy_mode))]) |
---|
733 | except TypeError: |
---|
734 | ndbps = util.get_rate_info(mcs, phy_mode, phy_samp_rate)['NDBPS'] |
---|
735 | |
---|
736 | # Compute the number of symbols in DATA field |
---|
737 | # - 16 = LEN_SERVICE (2 bytes) |
---|
738 | # - 6 = LEN_TAIL (6 bits) |
---|
739 | # - np.ceil() infers any PAD bits |
---|
740 | |
---|
741 | num_data_syms = np.ceil((16.0 + 6.0 + 8*payload_length) / ndbps) |
---|
742 | |
---|
743 | # HTMF waveforms have 4 extra preamble symbols |
---|
744 | # HT-SIG1, HT-SIG2, HT-STF, HT-LTF |
---|
745 | num_ht_preamble_syms = 4 * (phy_mode == util.phy_modes['HTMF']) |
---|
746 | |
---|
747 | T_TOT = T_PREAMBLE + T_SIG + (T_SYM * num_ht_preamble_syms) + (T_SYM * num_data_syms) + T_EXT |
---|
748 | |
---|
749 | return T_TOT |
---|
750 | |
---|
751 | # End def |
---|
752 | |
---|
753 | |
---|
754 | def find_overlapping_tx_low(src_tx_low, int_tx_low): |
---|
755 | """Finds TX_LOW entries in the source that are overlapped by the TX_LOW entries in other flow. |
---|
756 | |
---|
757 | Args: |
---|
758 | src_tx_low (Numpy Array): Source TX_LOW numpy array of entries |
---|
759 | int_tx_low (Numpy Array): Other TX_LOW numpy array of entries |
---|
760 | phy_sample_rate (int): Sample rate of the PHY |
---|
761 | |
---|
762 | Returns: |
---|
763 | indexes (tuple): |
---|
764 | Tuple containing indexes into the provided arrays indicating which entries overlapped |
---|
765 | """ |
---|
766 | import numpy as np |
---|
767 | |
---|
768 | import wlan_exp.log.coll_util as collision_utility |
---|
769 | |
---|
770 | src_ts = src_tx_low['timestamp'] |
---|
771 | int_ts = int_tx_low['timestamp'] |
---|
772 | |
---|
773 | src_dur = np.uint64(calc_tx_time(src_tx_low['mcs'], src_tx_low['phy_mode'], src_tx_low['length'], src_tx_low['phy_samp_rate'])) |
---|
774 | int_dur = np.uint64(calc_tx_time(int_tx_low['mcs'], int_tx_low['phy_mode'], int_tx_low['length'], int_tx_low['phy_samp_rate'])) |
---|
775 | |
---|
776 | src_idx = [] |
---|
777 | int_idx = [] |
---|
778 | |
---|
779 | src_idx, int_idx = collision_utility._collision_idx_finder(src_ts, src_dur, int_ts, int_dur) |
---|
780 | |
---|
781 | src_idx = src_idx[src_idx>0] |
---|
782 | int_idx = int_idx[int_idx>0] |
---|
783 | |
---|
784 | return (src_idx, int_idx) |
---|
785 | |
---|
786 | # End def |
---|
787 | |
---|
788 | |
---|
789 | |
---|
790 | def convert_datetime_to_log_time_str(datetime_obj): |
---|
791 | """Convert a datetime object to a log time string. |
---|
792 | |
---|
793 | Args: |
---|
794 | datetime_obj (DateTime()): Python DateTime() object |
---|
795 | |
---|
796 | Returns: |
---|
797 | log_time_str (str): |
---|
798 | String format of the DateTime() object to be used in HDF5 files |
---|
799 | """ |
---|
800 | |
---|
801 | if datetime_obj.tzinfo is None: |
---|
802 | import datetime |
---|
803 | |
---|
804 | class UTC(datetime.tzinfo): |
---|
805 | def utcoffset(self, dt): return datetime.timedelta(0) |
---|
806 | def tzname(self, dt): return "UTC" |
---|
807 | def dst(self, dt): return datetime.timedelta(0) |
---|
808 | |
---|
809 | datetime_obj = datetime_obj.replace(tzinfo=UTC()) |
---|
810 | |
---|
811 | return datetime_obj.strftime("%Y-%m-%d %H:%M:%S%z") |
---|
812 | |
---|
813 | # End def |
---|
814 | |
---|
815 | |
---|
816 | def convert_log_time_str_to_datetime(log_time_str): |
---|
817 | """Convert a log time string to a datetime object. |
---|
818 | |
---|
819 | Args: |
---|
820 | log_time_str (str): String format of the DateTime() object to be used in HDF5 files |
---|
821 | |
---|
822 | Returns: |
---|
823 | datetime_obj (DateTime()): Python DateTime() object |
---|
824 | """ |
---|
825 | import datetime |
---|
826 | |
---|
827 | return datetime.datetime.strptime(log_time_str, "%Y-%m-%d %H:%M:%S%z") |
---|
828 | |
---|
829 | # End def |
---|
830 | |
---|
831 | |
---|
832 | |
---|
833 | def get_now_as_log_time_str(): |
---|
834 | """Get the current time as a log time string. |
---|
835 | |
---|
836 | This should be used instead of datetime.datetime.now() because it |
---|
837 | automatically handles timezones. |
---|
838 | |
---|
839 | Returns: |
---|
840 | log_time_str (str): String format of the datetime.datetime.now() to be used in HDF5 files |
---|
841 | """ |
---|
842 | import time |
---|
843 | import datetime |
---|
844 | |
---|
845 | ZERO = datetime.timedelta(0) |
---|
846 | STDOFFSET = datetime.timedelta(seconds = -time.timezone) |
---|
847 | |
---|
848 | if time.daylight: |
---|
849 | DSTOFFSET = datetime.timedelta(seconds = -time.altzone) |
---|
850 | else: |
---|
851 | DSTOFFSET = STDOFFSET |
---|
852 | |
---|
853 | DSTDIFF = DSTOFFSET - STDOFFSET |
---|
854 | |
---|
855 | class LocalTimezone(datetime.tzinfo): |
---|
856 | |
---|
857 | def utcoffset(self, dt): |
---|
858 | if self._isdst(dt): |
---|
859 | return DSTOFFSET |
---|
860 | else: |
---|
861 | return STDOFFSET |
---|
862 | |
---|
863 | def dst(self, dt): |
---|
864 | if self._isdst(dt): |
---|
865 | return DSTDIFF |
---|
866 | else: |
---|
867 | return ZERO |
---|
868 | |
---|
869 | def tzname(self, dt): |
---|
870 | return time.tzname[self._isdst(dt)] |
---|
871 | |
---|
872 | def _isdst(self, dt): |
---|
873 | tt = (dt.year, dt.month, dt.day, |
---|
874 | dt.hour, dt.minute, dt.second, |
---|
875 | dt.weekday(), 0, 0) |
---|
876 | stamp = time.mktime(tt) |
---|
877 | tt = time.localtime(stamp) |
---|
878 | return tt.tm_isdst > 0 |
---|
879 | |
---|
880 | return convert_datetime_to_log_time_str(datetime.datetime.now(tz=LocalTimezone())) |
---|
881 | |
---|
882 | # End def |
---|
883 | |
---|
884 | |
---|
885 | |
---|
886 | # ----------------------------------------------------------------------------- |
---|
887 | # Log Printing Utilities |
---|
888 | # ----------------------------------------------------------------------------- |
---|
889 | def print_log_index_summary(log_index, title=None): |
---|
890 | """Prints a summary of the log_index. |
---|
891 | |
---|
892 | Args: |
---|
893 | log_index (dict): Log index dictionary |
---|
894 | title (str, optional): Title to be used for the log_index |
---|
895 | (default is 'Log Index Summary:') |
---|
896 | """ |
---|
897 | total_len = 0 |
---|
898 | |
---|
899 | if title is None: |
---|
900 | print('Log Index Summary:\n') |
---|
901 | else: |
---|
902 | print(title) |
---|
903 | |
---|
904 | for k in sorted(log_index.keys()): |
---|
905 | print('{0:>10,} of Type {1}'.format(len(log_index[k]), k)) |
---|
906 | total_len += len(log_index[k]) |
---|
907 | |
---|
908 | print('--------------------------') |
---|
909 | print('{0:>10,} total entries\n'.format(total_len)) |
---|
910 | |
---|
911 | # End def |
---|
912 | |
---|
913 | |
---|
914 | def _print_log_entries(log_bytes, log_index, entries_slice=None): |
---|
915 | """Work in progress - built for debugging address issues, some variant of this will be useful |
---|
916 | for creating text version of raw log w/out requiring numpy""" |
---|
917 | |
---|
918 | from itertools import chain |
---|
919 | from entry_types import log_entry_types |
---|
920 | hdr_size = 8 |
---|
921 | |
---|
922 | if(entries_slice is not None) and (type(entries_slice) is slice): |
---|
923 | log_slice = entries_slice |
---|
924 | else: |
---|
925 | # Use entire log index by default |
---|
926 | tot_entries = sum(map(len(log_index.values()))) |
---|
927 | log_slice = slice(0, tot_entries) |
---|
928 | |
---|
929 | # Create flat list of all byte offsets in log_index, sorted by offset |
---|
930 | # See http://stackoverflow.com/questions/18642428/concatenate-an-arbitrary-number-of-lists-in-a-function-in-python |
---|
931 | log_index_flat = sorted(chain.from_iterable(log_index.values())) |
---|
932 | |
---|
933 | for entry_offset in log_index_flat[log_slice]: |
---|
934 | |
---|
935 | # Look backwards for the log entry header and extract the entry type ID and size |
---|
936 | hdr_b = log_bytes[entry_offset-hdr_size:entry_offset] |
---|
937 | entry_type_id = (ord(hdr_b[4]) + (ord(hdr_b[5]) * 256)) |
---|
938 | entry_size = (ord(hdr_b[6]) + (ord(hdr_b[7]) * 256)) |
---|
939 | |
---|
940 | # Lookup the corresponding entry object instance (KeyError here indicates corrupt log or index) |
---|
941 | entry_type = log_entry_types[entry_type_id] |
---|
942 | |
---|
943 | # Use the entry_type's class method to string-ify itself |
---|
944 | print(entry_type._entry_as_string(log_bytes[entry_offset : entry_offset+entry_size])) |
---|
945 | |
---|
946 | # End def |
---|
947 | |
---|
948 | |
---|
949 | def _get_safe_filename(filename, print_warnings=True): |
---|
950 | """Create a 'safe' file name based on the current file name. |
---|
951 | |
---|
952 | Given the filename <path>/<name>.<ext>, this method first checks if the |
---|
953 | file already exists. If so, a new name is calculated with the form: |
---|
954 | <path>/<name>_<date>_<id>.<ext>, where <date> is a formatted |
---|
955 | string from time and <id> is a unique ID starting at zero if more |
---|
956 | than one file is created in a given second. If the requested filename |
---|
957 | did not already exist, the name is returned unchanged. |
---|
958 | |
---|
959 | This method is only suitable in environments where it can |
---|
960 | safely assumed that no conflicting files will be created in between the |
---|
961 | os.path.isfile() calls below and the use of the returned safe filename. |
---|
962 | """ |
---|
963 | import os |
---|
964 | import time |
---|
965 | |
---|
966 | if os.path.isfile(filename): |
---|
967 | # Already know it's a file, so fn_file is not '' |
---|
968 | (fn_fldr, fn_file) = os.path.split(filename) |
---|
969 | |
---|
970 | # Find the last '.' in the file name and classify everything after that as the <ext> |
---|
971 | ext_i = fn_file.rfind('.') |
---|
972 | if (ext_i != -1): |
---|
973 | # Remember the original file extension |
---|
974 | fn_ext = fn_file[ext_i:] |
---|
975 | fn_base = fn_file[0:ext_i] |
---|
976 | else: |
---|
977 | fn_ext = '' |
---|
978 | fn_base = fn_file |
---|
979 | |
---|
980 | # Create a new filename |
---|
981 | i = 0 |
---|
982 | while True: |
---|
983 | ext = '_{0}_{1:02d}'.format(time.strftime("%Y%m%d_%H%M%S"), i) |
---|
984 | new_filename = fn_base + ext + fn_ext |
---|
985 | safe_filename = os.path.join(fn_fldr, new_filename) |
---|
986 | i += 1 |
---|
987 | |
---|
988 | # Found a unique file name. Break the loop. |
---|
989 | if not os.path.isfile(safe_filename): |
---|
990 | if print_warnings: |
---|
991 | msg = 'WARNING: File "{0}" already exists.\n'.format(filename) |
---|
992 | msg += ' Using replacement file name "{0}"'.format(safe_filename) |
---|
993 | print(msg) |
---|
994 | break |
---|
995 | else: |
---|
996 | # File didn't exist - use name as provided |
---|
997 | safe_filename = filename |
---|
998 | |
---|
999 | return safe_filename |
---|
1000 | |
---|
1001 | # End def |
---|
1002 | |
---|
1003 | |
---|