1/* SPDX-License-Identifier: GPL-2.0 */ 2 3/* Stage 5 definitions for creating trace events */ 4 5/* 6 * remember the offset of each array from the beginning of the event. 7 */ 8 9#undef __entry 10#define __entry entry 11 12#ifndef __STAGE5_STRING_SRC_H 13#define __STAGE5_STRING_SRC_H 14static inline const char *__string_src(const char *str) 15{ 16 if (!str) 17 return EVENT_NULL_STR; 18 return str; 19} 20#endif /* __STAGE5_STRING_SRC_H */ 21 22/* 23 * Fields should never declare an array: i.e. __field(int, arr[5]) 24 * If they do, it will cause issues in parsing and possibly corrupt the 25 * events. To prevent that from happening, test the sizeof() a fictitious 26 * type called "struct _test_no_array_##item" which will fail if "item" 27 * contains array elements (like "arr[5]"). 28 * 29 * If you hit this, use __array(int, arr, 5) instead. 30 */ 31#undef __field 32#define __field(type, item) \ 33 { (void)sizeof(struct _test_no_array_##item *); } 34 35#undef __field_ext 36#define __field_ext(type, item, filter_type) \ 37 { (void)sizeof(struct _test_no_array_##item *); } 38 39#undef __field_struct 40#define __field_struct(type, item) \ 41 { (void)sizeof(struct _test_no_array_##item *); } 42 43#undef __field_struct_ext 44#define __field_struct_ext(type, item, filter_type) \ 45 { (void)sizeof(struct _test_no_array_##item *); } 46 47#undef __array 48#define __array(type, item, len) 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) \ 52 __item_length = (len) * sizeof(type); \ 53 __data_offsets->item = __data_size + \ 54 offsetof(typeof(*entry), __data); \ 55 __data_offsets->item |= __item_length << 16; \ 56 __data_size += __item_length; 57 58#undef __string 59#define __string(item, src) __dynamic_array(char, item, \ 60 strlen(__string_src(src)) + 1) \ 61 __data_offsets->item##_ptr_ = src; 62 63#undef __string_len 64#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)\ 65 __data_offsets->item##_ptr_ = src; 66 67#undef __vstring 68#define __vstring(item, fmt, ap) __dynamic_array(char, item, \ 69 __trace_event_vstr_len(fmt, ap)) 70 71#undef __rel_dynamic_array 72#define __rel_dynamic_array(type, item, len) \ 73 __item_length = (len) * sizeof(type); \ 74 __data_offsets->item = __data_size + \ 75 offsetof(typeof(*entry), __data) - \ 76 offsetof(typeof(*entry), __rel_loc_##item) - \ 77 sizeof(u32); \ 78 __data_offsets->item |= __item_length << 16; \ 79 __data_size += __item_length; 80 81#undef __rel_string 82#define __rel_string(item, src) __rel_dynamic_array(char, item, \ 83 strlen(__string_src(src)) + 1) \ 84 __data_offsets->item##_ptr_ = src; 85 86#undef __rel_string_len 87#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)\ 88 __data_offsets->item##_ptr_ = src; 89 90/* 91 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold 92 * num_possible_cpus(). 93 */ 94#define __bitmask_size_in_bytes_raw(nr_bits) \ 95 (((nr_bits) + 7) / 8) 96 97#define __bitmask_size_in_longs(nr_bits) \ 98 ((__bitmask_size_in_bytes_raw(nr_bits) + \ 99 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) 100 101/* 102 * __bitmask_size_in_bytes is the number of bytes needed to hold 103 * num_possible_cpus() padded out to the nearest long. This is what 104 * is saved in the buffer, just to be consistent. 105 */ 106#define __bitmask_size_in_bytes(nr_bits) \ 107 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) 108 109#undef __bitmask 110#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ 111 __bitmask_size_in_longs(nr_bits)) 112 113#undef __cpumask 114#define __cpumask(item) __bitmask(item, nr_cpumask_bits) 115 116#undef __rel_bitmask 117#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \ 118 __bitmask_size_in_longs(nr_bits)) 119 120#undef __rel_cpumask 121#define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits) 122 123#undef __sockaddr 124#define __sockaddr(field, len) __dynamic_array(u8, field, len) 125 126#undef __rel_sockaddr 127#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) 128