Skip to content

Commit

Permalink
fix(libscap): use the correct memory barrier for ARM64
Browse files Browse the repository at this point in the history
Signed-off-by: Andrea Terzolo <[email protected]>
  • Loading branch information
Andreagit97 authored and poiana committed Sep 13, 2024
1 parent 398964a commit 5919fa9
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 28 deletions.
21 changes: 1 addition & 20 deletions userspace/libpman/src/ringbuffer_definitions.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.

#include "state.h"
#include <linux/bpf.h>
#include <libscap/scap_barrier.h>

/* Taken from libbpf: /src/ringbuf.c */
struct ring {
Expand Down Expand Up @@ -50,23 +51,3 @@ static inline int roundup_len(uint32_t len) {
/* round up to 8 byte alignment */
return (len + 7) / 8 * 8;
}

/* Taken from libbpf: `include/linux/compiler.h` */

#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)

#define barrier() asm volatile("" ::: "memory")

#define smp_store_release(p, v) \
do { \
barrier(); \
WRITE_ONCE(*p, v); \
} while(0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
barrier(); \
___p; \
})
9 changes: 3 additions & 6 deletions userspace/libscap/engine/bpf/scap_bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.

#include <libscap/compat/bpf.h>
#include <libscap/compat/perf_event.h>
#include <libscap/scap_barrier.h>

struct perf_event_sample {
struct perf_event_header header;
Expand Down Expand Up @@ -49,9 +50,7 @@ static inline void scap_bpf_get_buf_pointers(scap_device *dev,
*phead = header->data_head;
*ptail = header->data_tail;

// clang-format off
asm volatile("" ::: "memory");
// clang-format on
mem_barrier();

uint64_t cons = *ptail % header->data_size; // consumer position
uint64_t prod = *phead % header->data_size; // producer position
Expand Down Expand Up @@ -154,9 +153,7 @@ static inline void scap_bpf_advance_tail(struct scap_device *dev) {

header = (struct perf_event_mmap_page *)dev->m_buffer;

// clang-format off
asm volatile("" ::: "memory");
// clang-format on
mem_barrier();

ASSERT(dev->m_lastreadsize > 0);
/* `header->data_tail` is the consumer position. */
Expand Down
61 changes: 59 additions & 2 deletions userspace/libscap/linux/barrier.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: Apache-2.0
/*
Copyright (C) 2023 The Falco Authors.
Copyright (C) 2024 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -17,4 +17,61 @@ limitations under the License.
*/
#pragma once

#define mem_barrier() __sync_synchronize()
// This is taken from kernel headers `/include/linux/compiler.h`
// Used by libpman and scap_bpf engine

#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)

#define barrier() asm volatile("" ::: "memory")

#if defined(__x86_64__)

#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")

#define smp_store_release(p, v) \
do { \
barrier(); \
WRITE_ONCE(*p, v); \
} while(0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
barrier(); \
___p; \
})

#elif defined(__aarch64__)

#define smp_mb() asm volatile("dmb ish" ::: "memory")

#endif

#ifndef smp_mb
#define smp_mb() __sync_synchronize()
#endif

#ifndef smp_store_release
#define smp_store_release(p, v) \
do { \
smp_mb(); \
WRITE_ONCE(*p, v); \
} while(0)
#endif

#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
smp_mb(); \
___p; \
})
#endif

// This is defined by us
#if defined(__x86_64__)
#define mem_barrier() barrier()
#else
#define mem_barrier() smp_mb()
#endif

0 comments on commit 5919fa9

Please sign in to comment.