Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(libscap): use the correct memory barrier for ARM64 #2067

Merged
merged 1 commit into from
Sep 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 1 addition & 20 deletions userspace/libpman/src/ringbuffer_definitions.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.

#include "state.h"
#include <linux/bpf.h>
#include <libscap/scap_barrier.h>

/* Taken from libbpf: /src/ringbuf.c */
struct ring {
Expand Down Expand Up @@ -50,23 +51,3 @@ static inline int roundup_len(uint32_t len) {
/* round up to 8 byte alignment */
return (len + 7) / 8 * 8;
}

/* Taken from libbpf: `include/linux/compiler.h` */

#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)

#define barrier() asm volatile("" ::: "memory")

#define smp_store_release(p, v) \
do { \
barrier(); \
WRITE_ONCE(*p, v); \
} while(0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
barrier(); \
___p; \
})
9 changes: 3 additions & 6 deletions userspace/libscap/engine/bpf/scap_bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.

#include <libscap/compat/bpf.h>
#include <libscap/compat/perf_event.h>
#include <libscap/scap_barrier.h>

struct perf_event_sample {
struct perf_event_header header;
Expand Down Expand Up @@ -49,9 +50,7 @@ static inline void scap_bpf_get_buf_pointers(scap_device *dev,
*phead = header->data_head;
*ptail = header->data_tail;

// clang-format off
asm volatile("" ::: "memory");
// clang-format on
mem_barrier();

uint64_t cons = *ptail % header->data_size; // consumer position
uint64_t prod = *phead % header->data_size; // producer position
Expand Down Expand Up @@ -154,9 +153,7 @@ static inline void scap_bpf_advance_tail(struct scap_device *dev) {

header = (struct perf_event_mmap_page *)dev->m_buffer;

// clang-format off
asm volatile("" ::: "memory");
// clang-format on
mem_barrier();

ASSERT(dev->m_lastreadsize > 0);
/* `header->data_tail` is the consumer position. */
Expand Down
61 changes: 59 additions & 2 deletions userspace/libscap/linux/barrier.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: Apache-2.0
/*
Copyright (C) 2023 The Falco Authors.
Copyright (C) 2024 The Falco Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -17,4 +17,61 @@ limitations under the License.
*/
#pragma once

#define mem_barrier() __sync_synchronize()
// This is taken from kernel headers `/include/linux/compiler.h`
// Used by libpman and scap_bpf engine

#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)

#define barrier() asm volatile("" ::: "memory")

#if defined(__x86_64__)

#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")

#define smp_store_release(p, v) \
do { \
barrier(); \
WRITE_ONCE(*p, v); \
} while(0)

#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
barrier(); \
___p; \
})

#elif defined(__aarch64__)

#define smp_mb() asm volatile("dmb ish" ::: "memory")

#endif

#ifndef smp_mb
#define smp_mb() __sync_synchronize()
#endif

#ifndef smp_store_release
#define smp_store_release(p, v) \
do { \
smp_mb(); \
WRITE_ONCE(*p, v); \
} while(0)
#endif

#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p = READ_ONCE(*p); \
smp_mb(); \
___p; \
})
#endif

// This is defined by us
#if defined(__x86_64__)
Copy link
Member Author

@Andreagit97 Andreagit97 Sep 13, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is used by the scap_kmod and scap_bpf engines and it uses the same primitive of smp_store/smp_load

#define mem_barrier() barrier()
#else
#define mem_barrier() smp_mb()
#endif
Loading