Skip to content

Commit

Permalink
stable time in kernel
Browse files Browse the repository at this point in the history
Signed-off-by: Anastasios Papagiannis <[email protected]>
  • Loading branch information
tpapagian committed Feb 1, 2024
1 parent a308012 commit 2a5d740
Showing 1 changed file with 81 additions and 2 deletions.
83 changes: 81 additions & 2 deletions bpf/process/bpf_execve_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,84 @@ read_exe(struct task_struct *task, struct heap_exe *exe)
}
#endif

/* Parameters used to convert the timespec values: */
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define PSEC_PER_SEC 1000000000000LL
#define FSEC_PER_SEC 1000000000000000LL

/* Parameters used to convert the timespec values: */
#define PSEC_PER_NSEC 1000L

/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64) ~((u64)1 << 63))
#define TIME64_MIN (-TIME64_MAX - 1)

#define KTIME_MAX ((s64) ~((u64)1 << 63))
#define KTIME_MIN (-KTIME_MAX - 1)
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)

#define USER_HZ 100 /* some user interfaces are */

static __attribute__((always_inline)) inline s64 timespec64_to_ns(struct timespec64 *ts)
{
__s64 tv_sec = BPF_CORE_READ(ts, tv_sec);
long tv_nsec = BPF_CORE_READ(ts, tv_nsec);

/* Prevent multiplication overflow / underflow */
if (tv_sec >= KTIME_SEC_MAX)
return KTIME_MAX;

if (tv_sec <= KTIME_SEC_MIN)
return KTIME_MIN;

return ((s64)tv_sec * NSEC_PER_SEC) + tv_nsec;
}

static __attribute__((always_inline)) inline u64 timens_add_boottime_ns(u64 nsec)
{
struct task_struct *task = (struct task_struct *)get_current_task();
struct time_namespace *time_ns = BPF_CORE_READ(task, nsproxy, time_ns);
struct timens_offsets ns_offsets;

probe_read(&ns_offsets, sizeof(struct timens_offsets), _(&time_ns->offsets));

return nsec + timespec64_to_ns(&ns_offsets.boottime);
}

static __attribute__((always_inline)) inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}

static __attribute__((always_inline)) inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}

static __attribute__((always_inline)) inline u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}

__attribute__((section("tracepoint/sys_execve"), used)) int
event_execve(struct sched_execve_args *ctx)
{
Expand All @@ -175,7 +253,7 @@ event_execve(struct sched_execve_args *ctx)
struct execve_map_value *parent;
struct msg_process *p;
__u32 zero = 0;
__u64 pid;
__u64 pid, start_boottime;

event = map_lookup_elem(&execve_msg_heap_map, &zero);
if (!event)
Expand All @@ -200,7 +278,8 @@ event_execve(struct sched_execve_args *ctx)
p->pid = pid >> 32;
p->tid = (__u32)pid;
p->nspid = get_task_pid_vnr();
p->ktime = ktime_get_ns();
start_boottime = BPF_CORE_READ(task, start_boottime);
p->ktime = nsec_to_clock_t(timens_add_boottime_ns(start_boottime)) * 10000000LLU; // similarly to what /proc/<PID>/stat provides (i.e. https://elixir.bootlin.com/linux/v6.1.75/source/fs/proc/array.c#L568)
p->size = offsetof(struct msg_process, args);
p->auid = get_auid();
p->uid = get_current_uid_gid();
Expand Down

0 comments on commit 2a5d740

Please sign in to comment.