diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1cb45be21b..a260e184bf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ Release report: TBD
### Added
+- New testing suite for checking analysisd EPS limitation([#2947](https://github.com/wazuh/wazuh-qa/pull/3181)) \- (Framework + Tests)
- Add stress results comparator tool ([#3478](https://github.com/wazuh/wazuh-qa/pull/3478)) \- (Tools)
- Add E2E tests for demo cases ([#3293](https://github.com/wazuh/wazuh-qa/pull/3293)) \- (Framework + Tests)
- Add configuration files for Jenkins automation of system/E2E tests ([#3221](https://github.com/wazuh/wazuh-qa/pull/3221)) \- (Framework)
diff --git a/deps/wazuh_testing/setup.py b/deps/wazuh_testing/setup.py
index edabec758e..4f59d6c320 100644
--- a/deps/wazuh_testing/setup.py
+++ b/deps/wazuh_testing/setup.py
@@ -27,7 +27,8 @@
'qa_ctl/deployment/dockerfiles/qa_ctl/*',
'qa_ctl/deployment/vagrantfile_template.txt',
'qa_ctl/provisioning/wazuh_deployment/templates/preloaded_vars.conf.j2',
- 'data/qactl_conf_validator_schema.json'
+ 'data/qactl_conf_validator_schema.json',
+ 'data/all_disabled_ossec.conf'
]
scripts_list = [
diff --git a/deps/wazuh_testing/wazuh_testing/__init__.py b/deps/wazuh_testing/wazuh_testing/__init__.py
index 951447820e..20ea7441ee 100644
--- a/deps/wazuh_testing/wazuh_testing/__init__.py
+++ b/deps/wazuh_testing/wazuh_testing/__init__.py
@@ -18,6 +18,9 @@
else:
WAZUH_PATH = os.path.join("/var", "ossec")
+
+WAZUH_CONF_PATH = os.path.join(WAZUH_PATH, 'etc', 'ossec.conf')
+WAZUH_LOGS_PATH = os.path.join(WAZUH_PATH, 'logs')
CLIENT_KEYS_PATH = os.path.join(WAZUH_PATH, 'etc' if platform.system() == 'Linux' else '', 'client.keys')
DB_PATH = os.path.join(WAZUH_PATH, 'queue', 'db')
QUEUE_DB_PATH = os.path.join(WAZUH_PATH, 'queue', 'db')
@@ -26,12 +29,15 @@
CVE_DB_PATH = os.path.join(WAZUH_PATH, 'queue', 'vulnerabilities', 'cve.db')
LOG_FILE_PATH = os.path.join(WAZUH_PATH, 'logs', 'ossec.log')
ALERTS_JSON_PATH = os.path.join(WAZUH_PATH, 'logs', 'alerts', 'alerts.json')
+ARCHIVES_LOG_PATH = os.path.join(WAZUH_PATH, 'logs', 'archives', 'archives.log')
+ARCHIVES_JSON_PATH = os.path.join(WAZUH_PATH, 'logs', 'archives', 'archives.json')
CPE_HELPER_PATH = os.path.join(WAZUH_PATH, 'queue', 'vulnerabilities', 'dictionaries', 'cpe_helper.json')
WAZUH_API_CONF = os.path.join(WAZUH_PATH, 'api', 'configuration', 'api.yaml')
WAZUH_SECURITY_CONF = os.path.join(WAZUH_PATH, 'api', 'configuration', 'security', 'security.yaml')
API_LOG_FILE_PATH = os.path.join(WAZUH_PATH, 'logs', 'api.log')
API_JSON_LOG_FILE_PATH = os.path.join(WAZUH_PATH, 'logs', 'api.json')
API_LOG_FOLDER = os.path.join(WAZUH_PATH, 'logs', 'api')
+WAZUH_TESTING_PATH = os.path.dirname(os.path.abspath(__file__))
# Daemons
LOGCOLLECTOR_DAEMON = 'wazuh-logcollector'
@@ -52,11 +58,17 @@
API_DAEMONS_REQUIREMENTS = [API_DAEMON, DB_DAEMON, EXEC_DAEMON, ANALYSISD_DAEMON, REMOTE_DAEMON, MODULES_DAEMON]
+# Paths
+SYSLOG_SIMULATOR = os.path.join(WAZUH_TESTING_PATH, 'scripts', 'syslog_simulator.py')
+ANALYSISD_STATE = os.path.join(WAZUH_PATH, 'var', 'run', 'wazuh-analysisd.state')
+
# Timeouts
T_5 = 5
T_10 = 10
T_20 = 20
T_30 = 30
+T_60 = 60
+
# Protocols
UDP = 'UDP'
diff --git a/deps/wazuh_testing/wazuh_testing/data/all_disabled_ossec.conf b/deps/wazuh_testing/wazuh_testing/data/all_disabled_ossec.conf
new file mode 100644
index 0000000000..388eb62feb
--- /dev/null
+++ b/deps/wazuh_testing/wazuh_testing/data/all_disabled_ossec.conf
@@ -0,0 +1,87 @@
+
+
+ yes
+
+
+
+
+ plain
+
+
+
+ secure
+ 1514
+ tcp
+ 131072
+
+
+
+
+ yes
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+
+ yes
+
+
+
+ no
+
+
+
+ no
+
+
+
+
+ yes
+
+
+
+
+ ruleset/decoders
+ ruleset/rules
+ 0215-policy_rules.xml
+ etc/lists/audit-keys
+ etc/lists/amazon/aws-eventnames
+ etc/lists/security-eventchannel
+
+
+ etc/decoders
+ etc/rules
+
+
+
+ yes
+ 1
+ 64
+ 15m
+
+
+
+
+ no
+ 1515
+ no
+ yes
+ no
+ HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH
+
+ no
+ etc/sslmanager.cert
+ etc/sslmanager.key
+ no
+
+
+
+
diff --git a/deps/wazuh_testing/wazuh_testing/modules/analysisd/__init__.py b/deps/wazuh_testing/wazuh_testing/modules/analysisd/__init__.py
new file mode 100644
index 0000000000..d9f79417a0
--- /dev/null
+++ b/deps/wazuh_testing/wazuh_testing/modules/analysisd/__init__.py
@@ -0,0 +1,9 @@
+
+ANALYSISD_PREFIX = r'.*wazuh-analysisd.*'
+MAILD_PREFIX = r'.*wazuh-maild.*'
+QUEUE_EVENTS_SIZE = 16384
+ANALYSISD_ONE_THREAD_CONFIG = {'analysisd.event_threads': '1', 'analysisd.syscheck_threads': '1',
+ 'analysisd.syscollector_threads': '1', 'analysisd.rootcheck_threads': '1',
+ 'analysisd.sca_threads': '1', 'analysisd.hostinfo_threads': '1',
+ 'analysisd.winevt_threads': '1', 'analysisd.rule_matching_threads': '1',
+ 'analysisd.dbsync_threads': '1', 'remoted.worker_pool': '1'}
diff --git a/deps/wazuh_testing/wazuh_testing/modules/analysisd/event_monitor.py b/deps/wazuh_testing/wazuh_testing/modules/analysisd/event_monitor.py
new file mode 100644
index 0000000000..66b488df22
--- /dev/null
+++ b/deps/wazuh_testing/wazuh_testing/modules/analysisd/event_monitor.py
@@ -0,0 +1,83 @@
+import re
+
+from wazuh_testing import T_10, T_20, T_60
+from wazuh_testing.modules.analysisd import ANALYSISD_PREFIX, MAILD_PREFIX
+from wazuh_testing import LOG_FILE_PATH, ANALYSISD_STATE
+from wazuh_testing.tools.monitoring import FileMonitor, generate_monitoring_callback_groups
+
+
+def make_analysisd_callback(pattern, prefix=ANALYSISD_PREFIX):
+ """Create a callback function from a text pattern.
+
+ It already contains the analsisd prefix.
+
+ Args:
+ pattern (str): String to match on the log.
+ prefix (str): regular expression used as a prefix before the pattern.
+
+ Returns:
+ lambda: function that returns if there's a match in the file
+
+ Examples:
+ >>> callback_bionic_update_started = make_vuln_callback("Starting Ubuntu Bionic database update")
+ """
+ pattern = r'\s+'.join(pattern.split())
+ regex = re.compile(r'{}{}'.format(prefix, pattern))
+
+ return lambda line: regex.match(line) is not None
+
+
+def check_analysisd_event(file_monitor=None, callback='', error_message=None, update_position=True,
+ timeout=T_60, prefix=ANALYSISD_PREFIX, accum_results=1, file_to_monitor=LOG_FILE_PATH):
+ """Check if a analysisd event occurs
+
+ Args:
+ file_monitor (FileMonitor): FileMonitor object to monitor the file content.
+ callback (str): log regex to check in Wazuh log
+ error_message (str): error message to show in case of expected event does not occur
+ update_position (boolean): filter configuration parameter to search in Wazuh log
+ timeout (str): timeout to check the event in Wazuh log
+ prefix (str): log pattern regex
+ accum_results (int): Accumulation of matches.
+ """
+ file_monitor = FileMonitor(file_to_monitor) if file_monitor is None else file_monitor
+ error_message = f"Could not find this event in {file_to_monitor}: {callback}" if error_message is None else \
+ error_message
+
+ file_monitor.start(timeout=timeout, update_position=update_position, accum_results=accum_results,
+ callback=make_analysisd_callback(callback, prefix), error_message=error_message)
+
+
+def check_eps_disabled():
+ """Check if the eps module is disabled"""
+ check_analysisd_event(callback=fr'.*INFO: EPS limit disabled.*', timeout=T_10)
+
+
+def check_eps_enabled(maximum, timeframe):
+ """Check if the eps module is enable"""
+ check_analysisd_event(callback=fr".*INFO: EPS limit enabled, EPS: '{maximum}', timeframe: '{timeframe}'",
+ timeout=T_10)
+
+
+def check_configuration_error():
+ """Check the configuration error event in ossec.log"""
+ check_analysisd_event(timeout=T_10, callback=r".* \(\d+\): Configuration error at.*",
+ error_message="Could not find the event 'Configuration error at 'etc/ossec.conf' "
+ 'in ossec.log', prefix=MAILD_PREFIX)
+
+
+def get_analysisd_state():
+ """Get the states values of wazuh-analysisd.state file
+
+ Returns:
+ dict: Dictionary with all analysisd state
+ """
+ data = ""
+ with open(ANALYSISD_STATE, 'r') as file:
+ for line in file.readlines():
+ if not line.startswith("#") and not line.startswith('\n'):
+ data = data + line.replace('\'', '')
+ data = data[:-1]
+ analysisd_state = dict((a.strip(), b.strip()) for a, b in (element.split('=') for element in data.split('\n')))
+
+ return analysisd_state
diff --git a/deps/wazuh_testing/wazuh_testing/scripts/simulate_agents.py b/deps/wazuh_testing/wazuh_testing/scripts/simulate_agents.py
index 14e91d0a04..7990097557 100644
--- a/deps/wazuh_testing/wazuh_testing/scripts/simulate_agents.py
+++ b/deps/wazuh_testing/wazuh_testing/scripts/simulate_agents.py
@@ -14,10 +14,8 @@
def parse_custom_labels(labels):
"""Parse the wazuh labels from string list to dict.
-
Args:
labels (list): Labels in format ["key1:value1", "key2:value2"]
-
Returns:
dict: Labels dictionary. {key1:value1, key2:value2}
"""
@@ -36,28 +34,27 @@ def parse_custom_labels(labels):
def process_script_parameters(args):
"""Process script parameters and edit them if necessary.
-
Args:
args (argparse.Namespace): Script args.
"""
# Add keepalive and receive_message modules if they are not specified in script parameters
- if 'keepalive' not in args.modules:
- args.modules.append('keepalive')
- args.modules_eps.append('0')
+ if not args.disable_keepalive:
+ if 'keepalive' not in args.modules:
+ args.modules.append('keepalive')
+ args.modules_eps.append('0')
- if 'receive_messages' not in args.modules:
- args.modules.append('receive_messages')
- args.modules_eps.append('0')
+ if not args.disable_receive:
+ if 'receive_messages' not in args.modules:
+ args.modules.append('receive_messages')
+ args.modules_eps.append('0')
def set_agent_modules_and_eps(agent, active_modules, modules_eps):
"""Set active modules and EPS to an agent.
-
Args:
agent (Agent): agent object.
active_modules (list): List of active modules.
modules_eps (list): List of EPS for each active module.
-
Raises:
ValueError: If number of active_modules items is not the same than the modules_eps.
ValueError: If a module does not exist on the agent simulator.
@@ -88,10 +85,8 @@ def set_agent_modules_and_eps(agent, active_modules, modules_eps):
def create_agents(args):
"""Create a list of agents according to script parameters like the mode, EPS...
-
Args:
args (list): List of script parameters.
-
Returns:
list: List of agents to run.
"""
@@ -114,7 +109,9 @@ def create_agents(args):
for item in distribution_list: # item[0] = modules - item[1] = eps
agent = ag.Agent(manager_address=args.manager_address, os=args.os,
registration_address=args.manager_registration_address,
- version=args.version, fixed_message_size=args.fixed_message_size, labels=custom_labels)
+ version=args.version, fixed_message_size=args.fixed_message_size, labels=custom_labels,
+ logcollector_msg_number=args.enable_logcollector_message_number,
+ custom_logcollector_message=args.custom_logcollector_message)
set_agent_modules_and_eps(agent, item[0].split(' ') + ['keepalive', 'receive_messages'],
item[1].split(' ') + ['0', '0'])
agents.append(agent)
@@ -122,21 +119,22 @@ def create_agents(args):
for _ in range(args.agents_number):
agent = ag.Agent(manager_address=args.manager_address, os=args.os,
registration_address=args.manager_registration_address,
- version=args.version, fixed_message_size=args.fixed_message_size, labels=custom_labels)
+ version=args.version, fixed_message_size=args.fixed_message_size, labels=custom_labels,
+ logcollector_msg_number=args.enable_logcollector_message_number,
+ custom_logcollector_message=args.custom_logcollector_message)
set_agent_modules_and_eps(agent, args.modules, args.modules_eps)
agents.append(agent)
return agents
-def create_injectors(agents, manager_address, protocol):
+def create_injectors(agents, manager_address, protocol, limit_msg=None):
"""Create injectos objects from list of agents and connection parameters.
-
Args:
agents (list): List of agents to create the injectors (1 injector/agent).
manager_address (str): Manager IP address to connect the agents.
protocol (str): TCP or UDP protocol to connect the agents to the manager.
-
+ limit_msg (int): Maximum amount of message to be sent.
Returns:
list: List of injector objects.
"""
@@ -146,45 +144,47 @@ def create_injectors(agents, manager_address, protocol):
for agent in agents:
sender = ag.Sender(manager_address, protocol=protocol)
- injectors.append(ag.Injector(sender, agent))
+ injectors.append(ag.Injector(sender, agent, limit_msg))
return injectors
-def start(injector, time_alive):
+def start(injector, time_alive, limit_msg_enable=None):
"""Start the injector process for a specified time.
-
Args:
injector (Injector): Injector object.
time_alive (int): Period of time in seconds during the injector will be running.
+ limit_msg_enable (int): Amount of message to be sent.
"""
try:
injector.run()
- sleep(time_alive)
+ if limit_msg_enable is None:
+ sleep(time_alive)
+ else:
+ injector.wait()
finally:
stop(injector)
def stop(injector):
"""Stop the injector process.
-
Args:
injector (Injector): Injector object.
"""
injector.stop_receive()
-def run(injectors, time_alive):
+def run(injectors, time_alive, limit_msg_enable=None):
"""Run each injector in a separated process.
-
Args:
injectors (list): List of injector objects.
time_alive (int): Period of time in seconds during the injector will be running.
+ limit_msg_enable (int): Amount of message to be sent.
"""
processes = []
for injector in injectors:
- processes.append(Process(target=start, args=(injector, time_alive)))
+ processes.append(Process(target=start, args=(injector, time_alive, limit_msg_enable)))
for agent_process in processes:
agent_process.start()
@@ -195,16 +195,13 @@ def run(injectors, time_alive):
def calculate_eps_distribution(data, max_eps_per_agent):
"""Calculate the distribution of agents and EPS according to the input ratio.
-
Args:
data (list): List of dictionaries containing information about the module and the remaining EPS to be
distributed.
max_eps_per_agent (int): Maximum EPS load to be distributed to an agent.
-
Returns:
list: List of tuples, containing in the first position the modules to be launched by that agent, and in the
second position the EPS distribution for each module of that agent.
-
Example:
Input:
data =[
@@ -322,6 +319,28 @@ def main():
help='Waiting time in seconds between agent registration and the sending of events.',
required=False, default=0, dest='waiting_connection_time')
+ arg_parser.add_argument('-e', '--limit-msg', metavar='', type=int,
+ help='Limit the amount of message to send to the manager for each module.',
+ required=False, default=None, dest='limit_msg')
+
+ arg_parser.add_argument('-k', '--disable-keepalive', metavar='', type=bool,
+ help='Disable keepalive module',
+ required=False, default=False, dest='disable_keepalive')
+
+ arg_parser.add_argument('-d', '--disable-receive', metavar='', type=bool,
+ help='Disable receive message module',
+ required=False, default=False, dest='disable_receive')
+
+ arg_parser.add_argument('-c', '--enable-logcollector-message-number',
+ metavar='', type=bool,
+ help='Enable logcollector message number',
+ required=False, default=False, dest='enable_logcollector_message_number')
+
+ arg_parser.add_argument('-g', '--custom-logcollector-message',
+ metavar='', type=str,
+ help='Custom logcollector message',
+ required=False, default='', dest='custom_logcollector_message')
+
args = arg_parser.parse_args()
process_script_parameters(args)
@@ -333,9 +352,9 @@ def main():
# Waiting time to prevent CPU overload when registering many agents (registration + event generation).
sleep(args.waiting_connection_time)
- injectors = create_injectors(agents, args.manager_address, args.agent_protocol)
+ injectors = create_injectors(agents, args.manager_address, args.agent_protocol, args.limit_msg)
- run(injectors, args.simulation_time)
+ run(injectors, args.simulation_time, args.limit_msg)
if __name__ == "__main__":
diff --git a/deps/wazuh_testing/wazuh_testing/scripts/syslog_simulator.py b/deps/wazuh_testing/wazuh_testing/scripts/syslog_simulator.py
new file mode 100644
index 0000000000..6513770dc7
--- /dev/null
+++ b/deps/wazuh_testing/wazuh_testing/scripts/syslog_simulator.py
@@ -0,0 +1,120 @@
+import socket
+import argparse
+import sys
+import logging
+import time
+
+
+TCP = 'tcp'
+UDP = 'udp'
+DEFAULT_MESSAGE = 'Login failed: admin, test\n'
+DEFAULT_MESSAGE_SIZE = len(DEFAULT_MESSAGE.encode('utf-8'))
+LOGGER = logging.getLogger('syslog_simulator')
+TCP_LIMIT = 5000
+UDP_LIMIT = 200
+
+
+def set_logging(debug=False):
+ LOGGER.setLevel(logging.DEBUG if debug else logging.INFO)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter("%(asctime)s — %(levelname)s — %(message)s"))
+ LOGGER.addHandler(handler)
+
+
+def validate_parameters(parameters):
+ protocol_limit = TCP_LIMIT if parameters.protocol == TCP else UDP_LIMIT
+
+ if parameters.messages_number <= 0:
+ LOGGER.error(f"The number of messages parameter has to be greater than 0")
+ sys.exit(1)
+
+ if parameters.eps > 0 and parameters.eps > protocol_limit:
+ LOGGER.error(f"You can't select eps greather than {protocol_limit}")
+ sys.exit(1)
+
+
+def get_parameters():
+ arg_parser = argparse.ArgumentParser()
+
+ arg_parser.add_argument('-n', '--messages-number', metavar='', type=int,
+ help='Number of messages to send', required=True, default=0,
+ dest='messages_number')
+
+ arg_parser.add_argument('-m', '--message', metavar='', type=str,
+ help='Message to send', required=False, default=DEFAULT_MESSAGE,
+ dest='message')
+
+ arg_parser.add_argument('-a', '--address', metavar='', type=str,
+ help='Sender IP address', required=False, default='localhost',
+ dest='address')
+
+ arg_parser.add_argument('-p', '--port', metavar='', type=int,
+ help='Sender destination port', required=False, default=514,
+ dest='port')
+
+ arg_parser.add_argument('--protocol', metavar='', type=str,
+ help='Sender protocol', required=False, default='tcp', choices=['tcp', 'udp'],
+ dest='protocol')
+
+ arg_parser.add_argument('--numbered-messages', metavar='', required=False, type=int,
+ help='Add number of message at the end of its content starting with the indicated number '
+ 'and increasing by 1 for each of them', dest='numbered_messages', default=-1)
+
+ arg_parser.add_argument('-e', '--eps', metavar='', type=int,
+ help='Event per second', required=False, default=-1, dest='eps')
+
+ arg_parser.add_argument('-d', '--debug', action='store_true', required=False, help='Activate debug logging')
+
+ return arg_parser.parse_args()
+
+
+def send_messages(message, num_messages, eps, numbered_messages=-1, address='locahost', port=514, protocol=TCP):
+ sent_messages = 0
+ custom_message = f"{message}\n" if message[-1] != '\n' not in message else message
+ protocol_limit = TCP_LIMIT if protocol == TCP else UDP_LIMIT
+ speed = eps if eps > 0 else protocol_limit
+
+ LOGGER.info(f"Sending {num_messages} to {address}:{port} via {protocol.upper()} ({speed}/s)")
+
+ # Create socket
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM if protocol == TCP else socket.SOCK_DGRAM)
+ if protocol == TCP:
+ sock.connect((address, port))
+
+ try:
+ # Get initial time
+ initial_batch_time = time.time()
+ start_batch_time = time.time()
+
+ # Send the specified number messages
+ while sent_messages < num_messages:
+ # Update the message according to the parameters
+ final_message = f"{custom_message[:-1]} - {sent_messages + numbered_messages}\n" \
+ if numbered_messages != -1 else custom_message
+
+ if protocol == TCP:
+ sock.send(final_message.encode())
+ else:
+ sock.sendto(final_message.encode(), (address, port))
+ sent_messages += 1
+
+ # Wait until next batch
+ if sent_messages % speed == 0:
+ time.sleep(1 - (time.time() - start_batch_time))
+ start_batch_time = time.time()
+
+ LOGGER.info(f"Sent {sent_messages} messages in {round(time.time() - initial_batch_time, 0)}s")
+ finally:
+ sock.close()
+
+
+def main():
+ parameters = get_parameters()
+ set_logging(parameters.debug)
+ validate_parameters(parameters)
+ send_messages(parameters.message, parameters.messages_number, parameters.eps, parameters.numbered_messages,
+ parameters.address, parameters.port, parameters.protocol)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/wazuh_testing/wazuh_testing/tools/__init__.py b/deps/wazuh_testing/wazuh_testing/tools/__init__.py
index 4a018084e1..0a72f39517 100644
--- a/deps/wazuh_testing/wazuh_testing/tools/__init__.py
+++ b/deps/wazuh_testing/wazuh_testing/tools/__init__.py
@@ -1,7 +1,6 @@
# Copyright (C) 2015-2022, Wazuh Inc.
# Created by Wazuh, Inc. .
# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
-
import os
import platform
import subprocess
diff --git a/deps/wazuh_testing/wazuh_testing/tools/agent_simulator.py b/deps/wazuh_testing/wazuh_testing/tools/agent_simulator.py
index 3b2089b780..feb1e2c267 100644
--- a/deps/wazuh_testing/wazuh_testing/tools/agent_simulator.py
+++ b/deps/wazuh_testing/wazuh_testing/tools/agent_simulator.py
@@ -46,12 +46,10 @@
class Agent:
"""Class that allows us to simulate an agent registered in a manager.
-
This simulated agent allows sending-receiving messages and commands. In order to simulate
syscollector, FIM, FIM Integrity, rootcheck, hostinfo, winevt and logcollector modules the following classes have
been created: GeneratorSyscollector, GeneratorFIM, GeneratorIntegrityFIM, Rootcheck,
GeneratorHostinfo, GeneratorWinevt, Logcollector.
-
Args:
manager_address (str): Manager IP address.
cypher (str, optional): Cypher method. It can be [aes, blowfish]. Default aes.
@@ -68,7 +66,8 @@ class Agent:
authd_password (str), optional: Password for registration if needed.
registration_address (str, optional): Manager registration IP address.
retry_enrollment (bool, optional): retry then enrollment in case of error.
-
+ logcollector_msg_number (bool, optional): insert in the logcollector message the message number.
+ custom_logcollector_message (str): Custom logcollector message to be sent by the agent.
Attributes:
id (str): ID of the agent.
name (str): Agent name.
@@ -119,7 +118,8 @@ def __init__(self, manager_address, cypher="aes", os=None, rootcheck_sample=None
rootcheck_eps=100, logcollector_eps=100, authd_password=None, disable_all_modules=False,
rootcheck_frequency=60.0, rcv_msg_limit=0, keepalive_frequency=10.0, sca_frequency=60,
syscollector_frequency=60.0, syscollector_batch_size=10, hostinfo_eps=100, winevt_eps=100,
- fixed_message_size=None, registration_address=None, retry_enrollment=False):
+ fixed_message_size=None, registration_address=None, retry_enrollment=False,
+ logcollector_msg_number=None, custom_logcollector_message=''):
self.id = id
self.name = name
self.key = key
@@ -183,6 +183,8 @@ def __init__(self, manager_address, cypher="aes", os=None, rootcheck_sample=None
self.retry_enrollment = retry_enrollment
self.rcv_msg_queue = Queue(rcv_msg_limit)
self.fixed_message_size = fixed_message_size * 1024 if fixed_message_size is not None else None
+ self.logcollector_msg_number = logcollector_msg_number
+ self.custom_logcollector_message = custom_logcollector_message
self.setup(disable_all_modules=disable_all_modules)
def update_checksum(self, new_checksum):
@@ -213,7 +215,6 @@ def set_os(self):
def set_wpk_variables(self, sha=None, upgrade_exec_result=None, upgrade_notification=False, upgrade_script_result=0,
stage_disconnect=None):
"""Set variables related to wpk simulated responses.
-
Args:
sha (str): Shared key between manager and agent for remote upgrading.
upgrade_exec_result (int): Upgrade result status code.
@@ -262,7 +263,6 @@ def _register_helper(self):
def register(self):
"""Request to register the agent in the manager.
-
In addition, it sets the agent id and agent key with the response data.
"""
if self.retry_enrollment:
@@ -283,13 +283,10 @@ def register(self):
@staticmethod
def wazuh_padding(compressed_event):
"""Add the Wazuh custom padding to each event sent.
-
Args:
compressed_event (bytes): Compressed event with zlib.
-
Returns:
bytes: Padded event.
-
Examples:
>>> wazuh_padding(b'x\\x9c\\x15\\xc7\\xc9\\r\\x00 \\x08\\x04\\xc0\\x96\\\\\\x94\\xcbn0H\\x03\\xda\\x7f
\\x8c\\xf3\\x1b\\xd9e\\xec\\nJ[\\x04N\\xcf\\xa8\\xa6\\xa8\\x12\\x8d\\x08!\\xfe@}\\xb0
@@ -321,10 +318,8 @@ def create_encryption_key(self):
@staticmethod
def compose_event(message):
"""Compose event from raw message.
-
Returns:
bytes: Composed event.
-
Examples:
>>> compose_event('test')
b'6ef859712d8b215d9daf071ff67aaa62555551234567891:5555:test'
@@ -341,13 +336,10 @@ def compose_event(message):
def encrypt(self, padded_event):
"""Encrypt event using AES or Blowfish encryption.
-
Args:
padded_event (bytes): Padded event.
-
Returns:
bytes: Encrypted event.
-
Examples:
>>> agent.encrypt(b'!!!!!!!!x\\x9c\\x15\\xc7\\xc9\\r\\x00 \\x08\\x04\\xc0\\x96\\\\\\x94\\xcbn0H\\x03\\xda
\\x7f\\x8c\\xf3\\x1b\\xd9e\\xec\\nJ[\\x04N\\xcf\\xa8\\xa6\\xa8\\x12\\x8d\\x08!\\xfe@}
@@ -366,11 +358,9 @@ def encrypt(self, padded_event):
def headers(self, agent_id, encrypted_event):
"""
Add event headers for AES or Blowfish Cyphers.
-
Args:
agent_id (str): Agent id.
encrypted_event (str): Encrypted event.
-
Returns:
bytes: Encrypted event with headers.
"""
@@ -383,13 +373,10 @@ def headers(self, agent_id, encrypted_event):
def create_event(self, message):
"""Build an event from a raw string message.
-
Args:
message (str): Raw message.
-
Returns:
bytes: Built event (compressed, padded, enceypted and with headers).
-
Examples:
>>> create_event('test message')
b'!005!#AES:\\xab\\xfa\\xcc2;\\x87\\xab\\x7fUH\\x03>_J\\xda=I\\x96\\xb5\\xa4\\x89\\xbe\\xbf`\\xd0\\xad
@@ -411,7 +398,6 @@ def create_event(self, message):
def receive_message(self, sender):
"""Agent listener to receive messages and process the accepted commands.
-
Args:
sender (Sender): Object to establish connection with the manager socket and receive/send information.
"""
@@ -463,9 +449,7 @@ def stop_receiver(self):
def process_message(self, sender, message):
"""Process agent received messages.
-
If the message contains reserved words, then it will be proceed as command.
-
Args:
sender (Sender): Object to establish connection with the manager socket and receive/send information.
message (str): Decoder message in ISO-8859-1 format.
@@ -481,11 +465,9 @@ def process_message(self, sender, message):
def process_command(self, sender, message_list):
"""Process agent received commands through the socket.
-
Args:
sender (Sender): Object to establish connection with the manager socket and receive/send information.
message_list (list): Message split by white spaces.
-
Raises:
ValueError: if 'sha1' command and sha_key Agent value is not defined.
ValueError: if execution result is not configured in the Agent.
@@ -632,7 +614,6 @@ def create_keep_alive(self):
def initialize_modules(self, disable_all_modules):
"""Initialize and enable agent modules.
-
Args:
disable_all_modules (boolean): True to disable all modules, False to leave the default ones enabled.
"""
@@ -660,7 +641,8 @@ def initialize_modules(self, disable_all_modules):
def init_logcollector(self):
"""Initialize logcollector module."""
if self.logcollector is None:
- self.logcollector = Logcollector()
+ self.logcollector = Logcollector(enable_msg_number=self.logcollector_msg_number,
+ custom_logcollector_message=self.custom_logcollector_message)
def init_sca(self):
"""Initialize init_sca module."""
@@ -675,7 +657,8 @@ def init_syscollector(self):
def init_rootcheck(self):
"""Initialize rootcheck module."""
if self.rootcheck is None:
- self.rootcheck = Rootcheck(os = self.os, agent_name = self.name, agent_id = self.id, rootcheck_sample = self.rootcheck_sample)
+ self.rootcheck = Rootcheck(os=self.os, agent_name=self.name, agent_id=self.id,
+ rootcheck_sample=self.rootcheck_sample)
def init_fim(self):
"""Initialize fim module."""
@@ -711,7 +694,6 @@ def get_agent_version(self):
def get_connection_status(self):
"""Get agent connection status of global.db.
-
Returns:
str: Agent connection status (connected, disconnected, never_connected)
"""
@@ -720,7 +702,6 @@ def get_connection_status(self):
@retry(AttributeError, attempts=10, delay=5, delay_multiplier=1)
def wait_status_active(self):
"""Wait until agent status is active in global.db.
-
Raises:
AttributeError: If the agent is not active. Combined with the retry decorator makes a wait loop
until the agent is active.
@@ -733,7 +714,6 @@ def wait_status_active(self):
def set_module_status(self, module_name, status):
"""Set module status.
-
Args:
module_name (str): Module name.
status (str): Module status.
@@ -752,19 +732,15 @@ def set_module_attribute(self, module_name, attribute, value):
class GeneratorSyscollector:
"""This class allows the generation of syscollector events.
-
Create events of different syscollector event types Network, Process, Port, Packages, OS, Hardware and Hotfix.
In order to change messages events it randomized different fields of templates specified by .
In order to simulate syscollector module, it send a set of the same syscollector type messages,
which size is specified by `batch_size` attribute. Example of syscollector message:
-
d:syscollector:{"type":"network","ID":18,"timestamp":"2021/03/26 00:00:00","iface":{"name":"O977Q1F55O",
"type":"ethernet","state":"up","MAC":"08:00:27:be:ce:3a","tx_packets":2135,"rx_packets":9091,"tx_bytes":210748,
"rx_bytes":10134272,"tx_errors":0,"rx_errors":0,"tx_dropped":0,"rx_dropped":0,"MTU":1500,"IPv4":
{"address":["10.0.2.15"],"netmask":["255.255.255.0"],"broadcast":["10.0.2.255"],
"metric":100,"gateway":"10.0.2.2","DHCP":"enabled"}}}
-
-
Args:
agent_name (str): Name of the agent.
batch_size (int): Number of messages of the same type
@@ -782,10 +758,8 @@ def __init__(self, agent_name, batch_size):
def format_event(self, message_type):
"""Format syscollector message of the specified type.
-
Args:
message_type (str): Syscollector event type.
-
Returns:
str: the generated syscollector event message.
"""
@@ -827,10 +801,8 @@ def format_event(self, message_type):
def generate_event(self):
"""Generate syscollector event.
-
The event types are selected sequentially, creating a number of events of the same type specified
in `bath_size`.
-
Returns:
str: generated event with the desired format for syscollector
"""
@@ -850,9 +822,7 @@ def generate_event(self):
class SCA:
"""This class allows the generation of sca_label events.
-
Create sca events, both summary and check.
-
Args:
os (str): Agent operative system.
"""
@@ -866,7 +836,6 @@ def __init__(self, os):
def get_message(self):
"""Alternatively creates summary and check SCA messages.
-
Returns:
str: an sca_label message formatted with the required header codes.
"""
@@ -884,10 +853,8 @@ def get_message(self):
def create_sca_event(self, event_type):
"""Create sca_label event of the desired type.
-
Args:
event_type (str): Event type summary or check.
-
Returns:
dict: SCA event.
"""
@@ -957,9 +924,7 @@ def create_check_sca_event(event_data):
class Rootcheck:
"""This class allows the generation of rootcheck events.
-
Creates rootcheck events by sequentially repeating the events of a sample file file.
-
Args:
agent_name (str): Name of the agent.
agent_id (str): Id of the agent.
@@ -994,7 +959,6 @@ def setup(self):
def get_message(self):
"""Returns a rootcheck message, informing when rootcheck scan starts and ends.
-
Returns:
str: a Rootcheck generated message
"""
@@ -1011,26 +975,36 @@ def get_message(self):
class Logcollector:
"""This class allows the generation of logcollector events."""
- def __init__(self):
+ def __init__(self, enable_msg_number=None, custom_logcollector_message=''):
self.logcollector_tag = 'syslog'
self.logcollector_mq = 'x'
+ # Those variables were added only in logcollector module to perform EPS test that need numbered messages.
+ self.message_counter = 0
+ self.enable_msg_number = enable_msg_number
+ self.custom_logcollector_message = custom_logcollector_message
def generate_event(self):
"""Generate logcollector event
-
Returns:
str: a Logcollector generated message
"""
- log = 'Mar 24 10:12:36 centos8 sshd[12249]: Invalid user random_user from 172.17.1.1 port 56550'
+ if not self.custom_logcollector_message:
+ log = 'Mar 24 10:12:36 centos8 sshd[12249]: Invalid user random_user from 172.17.1.1 port 56550'
+ else:
+ log = self.custom_logcollector_message
- message = f"{self.logcollector_mq}:{self.logcollector_tag}:{log}"
+ if self.enable_msg_number:
+ message_counter_info = f"Message number: {self.message_counter}"
+ message = f"{self.logcollector_mq}:{self.logcollector_tag}:{log}:{message_counter_info}"
+ self.message_counter = self.message_counter + 1
+ else:
+ message = f"{self.logcollector_mq}:{self.logcollector_tag}:{log}"
return message
class GeneratorIntegrityFIM:
"""This class allows the generation of fim_integrity events.
-
Args:
agent_id (str): The id of the agent.
agent_name (str): The name of the agent.
@@ -1046,7 +1020,6 @@ def __init__(self, agent_id, agent_name, agent_version):
def format_message(self, message):
"""Format FIM integrity message.
-
Args:
message (str): Integrity fim event.
"""
@@ -1054,7 +1027,6 @@ def format_message(self, message):
def generate_message(self):
"""Generate integrity FIM message according to `event_type` attribute.
-
Returns:
str: an IntegrityFIM formatted message
"""
@@ -1084,7 +1056,6 @@ def generate_message(self):
def get_message(self, event_type=None):
"""Generate a random kind of integrity FIM message according to `event_type` attribute.
-
Returns:
str: an IntegrityFIM formatted message
"""
@@ -1099,11 +1070,9 @@ def get_message(self, event_type=None):
class GeneratorHostinfo:
"""This class allows the generation of hostinfo events.
-
Creates hostinfo events, randomizing an open port detection template event on a host.
It randomizes the host, as well as the ports and their protocol. The number of open ports of the event is a
random number from 1 to 10. Example of hostinfo message:
-
3:/var/log/nmap.log:Host: 95.211.24.108 (), open ports: 43270 (udp) 37146 (tcp) 19885 (tcp)
"""
def __init__(self):
@@ -1114,7 +1083,6 @@ def __init__(self):
def generate_event(self):
""""Generates an arbitrary hostinfo message
-
Returns:
str: an hostinfo formatted message
"""
@@ -1133,12 +1101,9 @@ def generate_event(self):
class GeneratorWinevt:
"""This class allows the generation of winevt events.
-
Create events of the different winevt channels: System, Security, Application, Windows-Defender and Sysmon.
It uses template events (`data/winevt.py`) for which the `EventID` field is randomized. Message structure:
-
f:EventChannel:{"Message":"","Event":""}
-
Args:
agent_name (str): Name of the agent.
agent_id (str): ID of the agent.
@@ -1161,13 +1126,10 @@ def __init__(self, agent_name, agent_id):
def generate_event(self, winevt_type=None):
"""Generate Windows event.
-
Generate the desired type of Windows event (winevt). If no type of winvt message is provided,
all winvt message types will be generated sequentially.
-
Args:
winevt_type (str): Winevt type message `system, security, application, windows-defender, sysmon`.
-
Returns:
str: an windows event generated message.
"""
@@ -1183,7 +1145,6 @@ def generate_event(self, winevt_type=None):
class GeneratorFIM:
"""This class allows the generation of FIM events.
-
Args:
agent_id (str): The id of the agent.
agent_name (str): The name of the agent.
@@ -1221,7 +1182,6 @@ def __init__(self, agent_id, agent_name, agent_version):
def random_file(self):
"""Initialize file attribute.
-
Returns:
str: the new randomized file for the instance
"""
@@ -1230,7 +1190,6 @@ def random_file(self):
def random_size(self):
"""Initialize file size with random value
-
Returns:
str: the new randomized file size for the instance
"""
@@ -1239,7 +1198,6 @@ def random_size(self):
def random_mode(self):
"""Initialize module attribute with `S_IFREG` or `S_IFLNK`
-
Returns:
self._mode: the new randomized file mode for the instance
"""
@@ -1257,7 +1215,6 @@ def random_mode(self):
def random_uid(self):
"""Initialize uid attribute with random value.
-
Returns:
str: the new randomized file uid for the instance
"""
@@ -1267,7 +1224,6 @@ def random_uid(self):
def random_gid(self):
"""Initialize gid attribute with random value.
-
Returns:
str: the new randomized gid for the instance,
str: the new randomized gname for the instance.
@@ -1366,7 +1322,6 @@ def check_changed_attributes(self, attributes, old_attributes):
def get_attributes(self):
"""Return GeneratorFIM attributes.
-
Returns:
dict: instance attributes.
"""
@@ -1385,7 +1340,6 @@ def format_message(self, message):
"""Format FIM message.
Args:
message (str): FIM message.
-
Returns:
str: generated message with the required FIM header.
"""
@@ -1404,7 +1358,6 @@ def format_message(self, message):
def generate_message(self):
"""Generate FIM event based on `event_type` and `agent_version` attribute.
-
Returns:
str: generated message with the required FIM header.
"""
@@ -1451,7 +1404,6 @@ def get_message(self, event_mode=None, event_type=None):
Args:
event_mode (str): Event mode `real-time, whodata, scheduled`.
event_type (str): Event type `added, modified, deleted`.
-
Returns:
str: generated message.
"""
@@ -1472,13 +1424,11 @@ def get_message(self, event_mode=None, event_type=None):
class Sender:
"""This class sends events to the manager through a socket.
-
Attributes:
manager_address (str): IP of the manager.
manager_port (str, optional): port used by remoted in the manager.
protocol (str, optional): protocol used by remoted. TCP or UDP.
socket (socket): sock_stream used to connect with remoted.
-
Examples:
To create a Sender, you need to create an agent first, and then, create the sender. Finally, to send messages
you will need to use both agent and sender to create an injector.
@@ -1516,18 +1466,16 @@ def send_event(self, event):
class Injector:
"""This class simulates a daemon used to send and receive messages with the manager.
-
Each `Agent` needs an injector and a sender to be able to communicate with the manager. This class will create
a thread using `InjectorThread` which will behave similarly to an UNIX daemon. The `InjectorThread` will
send and receive the messages using the `Sender`
-
Attributes:
sender (Sender): sender used to connect to the sockets and send messages.
agent (agent): agent owner of the injector and the sender.
thread_number (int): total number of threads created. This may change depending on the modules used in the
agent.
threads (list): list containing all the threads created.
-
+ limit_msg (int): Maximum amount of message to be sent.
Examples:
To create an Injector, you need to create an agent, a sender and then, create the injector using both of them.
>>> import wazuh_testing.tools.agent_simulator as ag
@@ -1538,16 +1486,17 @@ class Injector:
>>> injector.run()
"""
- def __init__(self, sender, agent):
+ def __init__(self, sender, agent, limit=None):
self.sender = sender
self.agent = agent
+ self.limit_msg = limit
self.thread_number = 0
self.threads = []
for module, config in self.agent.modules.items():
if config["status"] == "enabled":
self.threads.append(
InjectorThread(self.thread_number, f"Thread-{self.agent.id}{module}", self.sender,
- self.agent, module))
+ self.agent, module, self.limit_msg))
self.thread_number += 1
def run(self):
@@ -1565,10 +1514,13 @@ def stop_receive(self):
self.sender.socket.shutdown(socket.SHUT_RDWR)
self.sender.socket.close()
+ def wait(self):
+ for thread in range(self.thread_number):
+ self.threads[thread].join()
+
class InjectorThread(threading.Thread):
"""This class creates a thread who will create and send the events to the manager for each module.
-
Attributes:
thread_id (int): ID of the thread.
name (str): name of the thread. It is composed as Thread-{agent.id}{module}.
@@ -1576,8 +1528,9 @@ class InjectorThread(threading.Thread):
agent (Agent): agent owner of the injector and the sender.
module (str): module used to send events (fim, syscollector, etc).
stop_thread (int): 0 if the thread is running, 1 if it is stopped.
+ limit_msg (int): Maximum amount of message to be sent.
"""
- def __init__(self, thread_id, name, sender, agent, module):
+ def __init__(self, thread_id, name, sender, agent, module, limit_msg=None):
super(InjectorThread, self).__init__()
self.thread_id = thread_id
self.name = name
@@ -1586,6 +1539,7 @@ def __init__(self, thread_id, name, sender, agent, module):
self.totalMessages = 0
self.module = module
self.stop_thread = 0
+ self.limit_msg = limit_msg
def keep_alive(self):
"""Send a keep alive message from the agent to the manager."""
@@ -1615,7 +1569,6 @@ def keep_alive(self):
def run_module(self, module):
"""Send a module message from the agent to the manager.
-
Args:
module (str): Module name
"""
@@ -1668,12 +1621,19 @@ def run_module(self, module):
char_size = getsizeof(event_msg[0]) - getsizeof('')
event_msg += 'A' * (dummy_message_size//char_size)
+ # Add message limitiation
+ if self.limit_msg:
+ if self.totalMessages >= self.limit_msg:
+ self.stop_thread = 1
+ break
+
event = self.agent.create_event(event_msg)
self.sender.send_event(event)
self.totalMessages += 1
sent_messages += 1
if self.totalMessages % eps == 0:
sleep(1.0 - ((time() - start_time) % 1.0))
+
if frequency > 1:
sleep(frequency - ((time() - start_time) % frequency))
@@ -1701,9 +1661,7 @@ def stop_rec(self):
def create_agents(agents_number, manager_address, cypher='aes', fim_eps=100, authd_password=None, agents_os=None,
agents_version=None, disable_all_modules=False):
"""Create a list of generic agents
-
This will create a list with `agents_number` amount of agents. All of them will be registered in the same manager.
-
Args:
agents_number (int): total number of agents.
manager_address (str): IP address of the manager.
@@ -1713,7 +1671,6 @@ def create_agents(agents_number, manager_address, cypher='aes', fim_eps=100, aut
agents_os (list, optional): list containing different operative systems for the agents.
agents_version (list, optional): list containing different version of the agent.
disable_all_modules (boolean): Disable all simulated modules for this agent.
-
Returns:
list: list of the new virtual agents.
"""
@@ -1734,7 +1691,6 @@ def create_agents(agents_number, manager_address, cypher='aes', fim_eps=100, aut
def connect(agent, manager_address='localhost', protocol=TCP, manager_port='1514'):
"""Connects an agent to the manager
-
Args:
agent (Agent): agent to connect.
manager_address (str): address of the manager. It can be an IP or a DNS.
diff --git a/deps/wazuh_testing/wazuh_testing/tools/configuration.py b/deps/wazuh_testing/wazuh_testing/tools/configuration.py
index 79166ce962..942d8c6953 100644
--- a/deps/wazuh_testing/wazuh_testing/tools/configuration.py
+++ b/deps/wazuh_testing/wazuh_testing/tools/configuration.py
@@ -13,7 +13,8 @@
from typing import List, Any, Set
from wazuh_testing import global_parameters, logger
-from wazuh_testing.tools import WAZUH_PATH, GEN_OSSEC, WAZUH_CONF, PREFIX, WAZUH_LOCAL_INTERNAL_OPTIONS, AGENT_CONF
+from wazuh_testing.tools import WAZUH_PATH, GEN_OSSEC, WAZUH_CONF, PREFIX, WAZUH_LOCAL_INTERNAL_OPTIONS, AGENT_CONF, \
+ LOCAL_RULES_PATH
from wazuh_testing import global_parameters, logger
from wazuh_testing.tools import file
@@ -742,3 +743,56 @@ def update_configuration_template(configurations, old_values, new_values):
configurations_to_update = configurations_to_update.replace(old_value, new_value)
return json.loads(configurations_to_update)
+
+
+def get_configuration(data_file_path):
+ """Load configuration from file.
+
+ Args:
+ data_file_path (str): Configuration file path.
+
+ Returns:
+ dict: Configurations names.
+ """
+ configuration_file = file.read_yaml(data_file_path)
+ configuration_parameters = {}
+
+ for test_case in configuration_file:
+ configuration_parameters.update(test_case['configuration_parameters'])
+
+ return configuration_parameters
+
+
+def get_wazuh_local_rules():
+ """
+ Get current `local_rules.xml` file content.
+
+ Returns
+ List(str): A list containing all the lines of the `local_rules.xml` file.
+ """
+ with open(LOCAL_RULES_PATH) as file:
+ lines = file.readlines()
+
+ return lines
+
+
+def write_wazuh_local_rules(local_rules: List[str]):
+ """
+ Write new rules in 'local_rules.xml' file.
+
+ Args:
+ local_rules (list or str): Lines to be written in the local_rules.xml file.
+ """
+ with open(LOCAL_RULES_PATH, 'w') as f:
+ f.writelines(local_rules)
+
+
+def get_minimal_configuration():
+ """Get the wazuh minimal configuration data.
+
+ Returns:
+ str: Wazuh minimal configuration data.
+ """
+ configuration = file.read_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../', 'data',
+ 'all_disabled_ossec.conf'))
+ return configuration
diff --git a/deps/wazuh_testing/wazuh_testing/tools/file.py b/deps/wazuh_testing/wazuh_testing/tools/file.py
index db633cd232..7e943290b6 100644
--- a/deps/wazuh_testing/wazuh_testing/tools/file.py
+++ b/deps/wazuh_testing/wazuh_testing/tools/file.py
@@ -14,6 +14,7 @@
import string
import xml.etree.ElementTree as ET
import zipfile
+import re
import filetype
import requests
@@ -21,7 +22,6 @@
from wazuh_testing import logger
-
def read_json(file_path):
"""
Read a JSON file from a given path, return a dictionary with the json data.
@@ -129,6 +129,7 @@ def random_string(length, encode=None):
return st
+
def generate_string(stringLength=10, character='0'):
"""Generate a string with line breaks.
@@ -154,6 +155,7 @@ def generate_string(stringLength=10, character='0'):
return generated_string
+
def read_file(file_path):
with open(file_path) as f:
data = f.read()
@@ -516,3 +518,26 @@ def download_text_file(file_url, local_destination_path):
def get_file_lines(path):
with open(path, "r+") as file_to_read:
return file_to_read.readlines()
+
+
+def replace_regex_in_file(search_regex, replace_regex, file_path):
+ """Perform replacements in a file data according to the specified regex.
+
+ Args:
+ search_regex (list(str)): Search regex list.
+ replace_regex (list(str)): Replacements regex list.
+ file_path (str): File path to read and update.
+ """
+ if (len(search_regex) != len(replace_regex)):
+ raise ValueError('search_regex has to have the same number of items than replace_regex. '
+ f"{len(search_regex)} != {len(replace_regex)}")
+
+ # Read the file content
+ file_data = read_file(file_path)
+
+ # Perform the replacements
+ for search, replace in zip(search_regex, replace_regex):
+ file_data = re.sub(search, replace, file_data)
+
+ # Write the file data
+ write_file(file_path, file_data)
diff --git a/deps/wazuh_testing/wazuh_testing/tools/monitoring.py b/deps/wazuh_testing/wazuh_testing/tools/monitoring.py
index faf5628b78..86d0e2b6b3 100644
--- a/deps/wazuh_testing/wazuh_testing/tools/monitoring.py
+++ b/deps/wazuh_testing/wazuh_testing/tools/monitoring.py
@@ -1081,3 +1081,20 @@ def callback_authd_startup(line):
if 'Accepting connections on port 1515' in line:
return line
return None
+
+
+def generate_monitoring_callback_groups(regex):
+ """
+ Generates a new callback that look for a specific pattern on a line passed.
+ If it finds a match, it returns the matched groups.
+ Args:
+ regex (str): regex to use to look for a match.
+ """
+ def new_callback(line):
+ match = re.match(regex, line)
+ if match:
+ if match.groups() is not None:
+ return match.groups()
+ return True
+
+ return new_callback
diff --git a/deps/wazuh_testing/wazuh_testing/tools/run_simulator.py b/deps/wazuh_testing/wazuh_testing/tools/run_simulator.py
new file mode 100644
index 0000000000..9d0e251fd5
--- /dev/null
+++ b/deps/wazuh_testing/wazuh_testing/tools/run_simulator.py
@@ -0,0 +1,25 @@
+import subprocess
+import sys
+
+from wazuh_testing import SYSLOG_SIMULATOR
+
+
+def syslog_simulator(parameters):
+ """Run the syslog simulator tool.
+
+ Args:
+ parameters (dict): Script parameters.
+ """
+ python_executable = sys.executable
+ run_parameters = f"{python_executable} {SYSLOG_SIMULATOR} "
+ run_parameters += f"-a {parameters['address']} " if 'address' in parameters else ''
+ run_parameters += f"-e {parameters['eps']} " if 'eps' in parameters else ''
+ run_parameters += f"--protocol {parameters['protocol']} " if 'protocol' in parameters else ''
+ run_parameters += f"-n {parameters['messages_number']} " if 'messages_number' in parameters else ''
+ run_parameters += f"-m '{parameters['message']}' " if 'message' in parameters else ''
+ run_parameters += f"--numbered-messages {parameters['numbered_messages']} " if 'numbered_messages' in parameters \
+ else ''
+ run_parameters = run_parameters.strip()
+
+ # Run the syslog simulator tool with custom parameters
+ subprocess.call(run_parameters, shell=True)
diff --git a/requirements.txt b/requirements.txt
index f6e6159add..dff1905040 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,14 +21,16 @@ py~=1.10.0
pycryptodome>=3.9.8
pyOpenSSL==19.1.0
pytest-html==3.1.1
-pytest==6.2.5
+pytest==6.2.2 ; python_version <= "3.9"
+pytest==7.1.2 ; python_version >= "3.10"
pyyaml==5.4
requests>=2.23.0
scipy>=1.0; platform_system == "Linux" or platform_system == "Darwin" or platform_system=='Windows'
seaborn>=0.11.1; platform_system == "Linux" or platform_system == "Darwin" or platform_system=='Windows'
setuptools~=56.0.0
testinfra==5.0.0
-jq>=1.1.2; platform_system == "Linux" or platform_system == "Darwin"
+jq==1.1.2 ; (platform_system == "Linux" or platform_system == "Darwin") and python_version <= "3.9"
+jq==1.2.2 ; python_version >= "3.10"
cryptography==3.3.2; platform_system == "Linux" or platform_system == "Darwin" or platform_system=='Windows'
urllib3>=1.26.5
numpydoc>=1.1.0
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index fdb1a180b8..1a94061e3a 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -15,7 +15,7 @@
from py.xml import html
import wazuh_testing.tools.configuration as conf
-from wazuh_testing import global_parameters, logger, ALERTS_JSON_PATH
+from wazuh_testing import global_parameters, logger, ALERTS_JSON_PATH, ARCHIVES_LOG_PATH, ARCHIVES_JSON_PATH
from wazuh_testing.logcollector import create_file_structure, delete_file_structure
from wazuh_testing.tools import LOG_FILE_PATH, WAZUH_CONF, get_service, ALERT_FILE_PATH, WAZUH_LOCAL_INTERNAL_OPTIONS
from wazuh_testing.tools.configuration import get_wazuh_conf, set_section_wazuh_conf, write_wazuh_conf
@@ -26,6 +26,8 @@
from wazuh_testing import mocking
from wazuh_testing.db_interface.agent_db import update_os_info
from wazuh_testing.db_interface.global_db import get_system, modify_system
+from wazuh_testing.tools.run_simulator import syslog_simulator
+from wazuh_testing.tools.configuration import get_minimal_configuration
if sys.platform == 'win32':
@@ -138,6 +140,24 @@ def restart_wazuh_daemon_after_finishing(daemon=None):
control_service("restart", daemon=daemon)
+@pytest.fixture(scope='function')
+def restart_wazuh_daemon_after_finishing_function(daemon=None):
+ """
+ Restart a Wazuh daemon
+ """
+ yield
+ truncate_file(LOG_FILE_PATH)
+ control_service("restart", daemon=daemon)
+
+
+@pytest.fixture(scope='function')
+def restart_analysisd_function():
+ """Restart wazuh-analysisd daemon before starting a test, and stop it after finishing"""
+ control_service('restart', daemon='wazuh-analysisd')
+ yield
+ control_service('stop', daemon='wazuh-analysisd')
+
+
@pytest.fixture(scope='module')
def reset_ossec_log(get_configuration, request):
# Reset ossec.log and start a new monitor
@@ -545,6 +565,67 @@ def connect_to_sockets_configuration(request, get_configuration):
close_sockets(receiver_sockets)
+@pytest.fixture(scope='module')
+def configure_local_internal_options_module(request):
+ """Fixture to configure the local internal options file.
+
+ It uses the test variable local_internal_options. This should be
+ a dictionary wich keys and values corresponds to the internal option configuration, For example:
+ local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' }
+ """
+ try:
+ local_internal_options = request.param
+ except AttributeError:
+ try:
+ local_internal_options = getattr(request.module, 'local_internal_options')
+ except AttributeError:
+ logger.debug('local_internal_options is not set')
+ raise AttributeError('Error when using the fixture "configure_local_internal_options_module", no '
+ 'parameter has been passed explicitly, nor is the variable local_internal_options '
+ 'found in the module.') from AttributeError
+
+ backup_local_internal_options = conf.get_local_internal_options_dict()
+
+ logger.debug(f"Set local_internal_option to {str(local_internal_options)}")
+ conf.set_local_internal_options_dict(local_internal_options)
+
+ yield local_internal_options
+
+ logger.debug(f"Restore local_internal_option to {str(backup_local_internal_options)}")
+ conf.set_local_internal_options_dict(backup_local_internal_options)
+
+
+@pytest.fixture(scope='function')
+def configure_local_internal_options_function(request):
+ """Fixture to configure the local internal options file.
+
+ It uses the test variable local_internal_options. This should be
+ a dictionary wich keys and values corresponds to the internal option configuration, For example:
+ local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' }
+ """
+ try:
+ local_internal_options = request.param
+ except AttributeError:
+ try:
+ local_internal_options = getattr(request.module, 'local_internal_options')
+ except AttributeError:
+ logger.debug('local_internal_options is not set')
+ raise AttributeError('Error when using the fixture "configure_local_internal_options_module", no '
+ 'parameter has been passed explicitly, nor is the variable local_internal_options '
+ 'found in the module.') from AttributeError
+
+ backup_local_internal_options = conf.get_local_internal_options_dict()
+
+ logger.debug(f"Set local_internal_option to {str(local_internal_options)}")
+ conf.set_local_internal_options_dict(local_internal_options)
+
+ yield
+
+ logger.debug(f"Restore local_internal_option to {str(backup_local_internal_options)}")
+ conf.set_local_internal_options_dict(backup_local_internal_options)
+
+
+# DEPRECATED
@pytest.fixture(scope='module')
def configure_local_internal_options(get_local_internal_options):
"""Configure Wazuh local internal options.
@@ -911,31 +992,6 @@ def file_monitoring(request):
logger.debug(f"Trucanted {file_to_monitor}")
-@pytest.fixture(scope='module')
-def configure_local_internal_options_module(request):
- """Fixture to configure the local internal options file.
-
- It uses the test variable local_internal_options. This should be
- a dictionary wich keys and values corresponds to the internal option configuration, For example:
- local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' }
- """
- try:
- local_internal_options = getattr(request.module, 'local_internal_options')
- except AttributeError as local_internal_configuration_not_set:
- logger.debug('local_internal_options is not set')
- raise local_internal_configuration_not_set
-
- backup_local_internal_options = conf.get_local_internal_options_dict()
-
- logger.debug(f"Set local_internal_option to {str(local_internal_options)}")
- conf.set_local_internal_options_dict(local_internal_options)
-
- yield
-
- logger.debug(f"Restore local_internal_option to {str(backup_local_internal_options)}")
- conf.set_local_internal_options_dict(backup_local_internal_options)
-
-
@pytest.fixture(scope='function')
def set_wazuh_configuration(configuration):
"""Set wazuh configuration
@@ -961,31 +1017,6 @@ def set_wazuh_configuration(configuration):
conf.write_wazuh_conf(backup_config)
-@pytest.fixture(scope='function')
-def configure_local_internal_options_function(request):
- """Fixture to configure the local internal options file.
-
- It uses the test variable local_internal_options. This should be
- a dictionary wich keys and values corresponds to the internal option configuration, For example:
- local_internal_options = {'monitord.rotate_log': '0', 'syscheck.debug': '0' }
- """
- try:
- local_internal_options = getattr(request.module, 'local_internal_options')
- except AttributeError as local_internal_configuration_not_set:
- logger.debug('local_internal_options is not set')
- raise local_internal_configuration_not_set
-
- backup_local_internal_options = conf.get_local_internal_options_dict()
-
- logger.debug(f"Set local_internal_option to {str(local_internal_options)}")
- conf.set_local_internal_options_dict(local_internal_options)
-
- yield
-
- logger.debug(f"Restore local_internal_option to {str(backup_local_internal_options)}")
- conf.set_local_internal_options_dict(backup_local_internal_options)
-
-
@pytest.fixture(scope='function')
def truncate_monitored_files():
"""Truncate all the log files and json alerts files before and after the test execution"""
@@ -1169,3 +1200,35 @@ def create_file(new_file_path):
yield
remove_file(new_file_path)
+
+
+@pytest.fixture(scope='session')
+def load_wazuh_basic_configuration():
+ """Load a new basic configuration to the manager"""
+ # Load ossec.conf with all disabled settings
+ minimal_configuration = get_minimal_configuration()
+
+ # Make a backup from current configuration
+ backup_ossec_configuration = get_wazuh_conf()
+
+ # Write new configuration
+ write_wazuh_conf(minimal_configuration)
+
+ yield
+
+ # Restore the ossec.conf backup
+ write_wazuh_conf(backup_ossec_configuration)
+
+
+@pytest.fixture(scope='function')
+def truncate_event_logs():
+ """Truncate all the event log files"""
+ log_files = [ARCHIVES_LOG_PATH, ARCHIVES_JSON_PATH]
+
+ for log_file in log_files:
+ truncate_file(log_file)
+
+ yield
+
+ for log_file in log_files:
+ truncate_file(log_file)
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_disabled.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_disabled.yaml
new file mode 100644
index 0000000000..3f3f7143e2
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_disabled.yaml
@@ -0,0 +1,5 @@
+- sections:
+ - section: vulnerability-detector
+ elements:
+ - enabled:
+ value: 'no'
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_enabled.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_enabled.yaml
new file mode 100644
index 0000000000..abca5be254
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/basic_test_module/configuration_enabled.yaml
@@ -0,0 +1,11 @@
+- sections:
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_accepted_values.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_accepted_values.yaml
new file mode 100644
index 0000000000..abca5be254
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_accepted_values.yaml
@@ -0,0 +1,11 @@
+- sections:
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_invalid_values.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_invalid_values.yaml
new file mode 100644
index 0000000000..abca5be254
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_invalid_values.yaml
@@ -0,0 +1,11 @@
+- sections:
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_missing_configuration.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_missing_configuration.yaml
new file mode 100644
index 0000000000..abca5be254
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/configuration_test_module/configuration_missing_configuration.yaml
@@ -0,0 +1,11 @@
+- sections:
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_drop_events_when_queue_is_full.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_drop_events_when_queue_is_full.yaml
new file mode 100644
index 0000000000..fa168de5a0
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_drop_events_when_queue_is_full.yaml
@@ -0,0 +1,21 @@
+- sections:
+ - section: remote
+ elements:
+ - connection:
+ value: syslog
+ - port:
+ value: PORT
+ - protocol:
+ value: PROTOCOL
+ - allowed-ips:
+ value: 0.0.0.0/0
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_limitation.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_limitation.yaml
new file mode 100644
index 0000000000..fa168de5a0
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_limitation.yaml
@@ -0,0 +1,21 @@
+- sections:
+ - section: remote
+ elements:
+ - connection:
+ value: syslog
+ - port:
+ value: PORT
+ - protocol:
+ value: PROTOCOL
+ - allowed-ips:
+ value: 0.0.0.0/0
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_multi_thread.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_multi_thread.yaml
new file mode 100644
index 0000000000..35820bf959
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_multi_thread.yaml
@@ -0,0 +1,23 @@
+- sections:
+ - section: remote
+ elements:
+ - connection:
+ value: syslog
+ - port:
+ value: PORT
+ - protocol:
+ value: PROTOCOL
+ - allowed-ips:
+ value: 0.0.0.0/0
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
+ - logall:
+ value: 'yes'
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_single_thread.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_single_thread.yaml
new file mode 100644
index 0000000000..35820bf959
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_processing_events_in_order_single_thread.yaml
@@ -0,0 +1,23 @@
+- sections:
+ - section: remote
+ elements:
+ - connection:
+ value: syslog
+ - port:
+ value: PORT
+ - protocol:
+ value: PROTOCOL
+ - allowed-ips:
+ value: 0.0.0.0/0
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
+ - logall:
+ value: 'yes'
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_queueing_events_after_limitation.yaml b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_queueing_events_after_limitation.yaml
new file mode 100644
index 0000000000..fa168de5a0
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/configuration_template/event_processing_test_module/configuration_queueing_events_after_limitation.yaml
@@ -0,0 +1,21 @@
+- sections:
+ - section: remote
+ elements:
+ - connection:
+ value: syslog
+ - port:
+ value: PORT
+ - protocol:
+ value: PROTOCOL
+ - allowed-ips:
+ value: 0.0.0.0/0
+ - section: global
+ elements:
+ - limits:
+ elements:
+ - eps:
+ elements:
+ - maximum:
+ value: MAXIMUM
+ - timeframe:
+ value: TIMEFRAME
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_disabled.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_disabled.yaml
new file mode 100644
index 0000000000..e62768a272
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_disabled.yaml
@@ -0,0 +1,8 @@
+- name: no configuration block
+ description: EPS Limits disabled
+ configuration_parameters:
+ MAXIMUM: '0'
+ TIMEFRAME: '0'
+ metadata:
+ maximum: 0
+ timeframe: 0
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_enabled.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_enabled.yaml
new file mode 100644
index 0000000000..945a2343ea
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/basic_test_module/cases_enabled.yaml
@@ -0,0 +1,8 @@
+- name: enabled
+ description: EPS Limits enabled
+ configuration_parameters:
+ MAXIMUM: '20'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 20
+ timeframe: 5
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_accepted_values.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_accepted_values.yaml
new file mode 100644
index 0000000000..daf9da4ad5
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_accepted_values.yaml
@@ -0,0 +1,8 @@
+- name: maximum 5000 - timeframe 10
+ description: accepted value
+ configuration_parameters:
+ MAXIMUM: '5000'
+ TIMEFRAME: '10'
+ metadata:
+ maximum: 5000
+ timeframe: 10
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_invalid_values.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_invalid_values.yaml
new file mode 100644
index 0000000000..45a6ffd55e
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_invalid_values.yaml
@@ -0,0 +1,35 @@
+- name: maximum value above the allowed value
+ description: EPS Limits with maximum values over the maximum allowed
+ configuration_parameters:
+ MAXIMUM: '100001'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 100001
+ timeframe: 5
+
+- name: timeframe value above the allowed value
+ description: EPS Limits with timeframe values over the timeframe allowed
+ configuration_parameters:
+ MAXIMUM: '10'
+ TIMEFRAME: '3601'
+ metadata:
+ maximum: 10
+ timeframe: 3601
+
+- name: timeframe = 0
+ description: EPS Limits with timeframe value set to 0
+ configuration_parameters:
+ MAXIMUM: '10'
+ TIMEFRAME: '0'
+ metadata:
+ maximum: 10
+ timeframe: 0
+
+- name: maximum, timeframe = 0
+ description: EPS Limits with maximum and timeframe value set to 0
+ configuration_parameters:
+ MAXIMUM: '0'
+ TIMEFRAME: '0'
+ metadata:
+ maximum: 0
+ timeframe: 0
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_missing_configuration.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_missing_configuration.yaml
new file mode 100644
index 0000000000..cf0f1430bd
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/configuration_test_module/cases_missing_configuration.yaml
@@ -0,0 +1,36 @@
+- name: missing timeframe
+ description: Missing tag configuration
+ configuration_parameters:
+ MAXIMUM: '1000'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 1000
+ timeframe: 5
+ behavior: works
+ remove_tags:
+ - 5
+
+- name: missing maximum
+ description: Missing tag configuration
+ configuration_parameters:
+ MAXIMUM: '1000'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 1000
+ timeframe: 5
+ behavior: disabled
+ remove_tags:
+ - 1000
+
+- name: missing maximum and timeframe
+ description: Missing and tag configuration
+ configuration_parameters:
+ MAXIMUM: '1000'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 1000
+ timeframe: 5
+ behavior: error
+ remove_tags:
+ - 1000
+ - 5
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_drop_events_when_queue_is_full.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_drop_events_when_queue_is_full.yaml
new file mode 100644
index 0000000000..8cdeadadc4
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_drop_events_when_queue_is_full.yaml
@@ -0,0 +1,16 @@
+- name: drop events
+ description: Drop events when events queue is full
+ configuration_parameters:
+ PORT: 514
+ PROTOCOL: tcp
+ MAXIMUM: '1'
+ TIMEFRAME: '100'
+ metadata:
+ maximum: 1
+ timeframe: 100
+ # syslog simulator parameters
+ address: localhost
+ port: 514
+ protocol: tcp
+ messages_number: 50000
+ eps: 5000
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_limitation.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_limitation.yaml
new file mode 100644
index 0000000000..5896343f28
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_limitation.yaml
@@ -0,0 +1,16 @@
+- name: not process events after exceeding the limit
+ description: EPS limitation
+ configuration_parameters:
+ PORT: 514
+ PROTOCOL: tcp
+ MAXIMUM: '500'
+ TIMEFRAME: '10'
+ metadata:
+ maximum: 500
+ timeframe: 10
+ # syslog simulator parameters
+ address: localhost
+ port: 514
+ protocol: tcp
+ messages_number: 10000
+ eps: 1000
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_multi_thread.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_multi_thread.yaml
new file mode 100644
index 0000000000..b33c718e1f
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_multi_thread.yaml
@@ -0,0 +1,23 @@
+- name: batch events order - multi-thread
+ description: Process events in batch order
+ configuration_parameters:
+ PORT: 514
+ PROTOCOL: tcp
+ MAXIMUM: '20'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 20
+ timeframe: 5
+ # syslog simulator parameters
+ address: localhost
+ num_batches: 5
+ batch_sending_time: 3
+ message_1: 'Login failed: admin, test - Group 1'
+ message_2: 'Login failed: admin, test - Group 2'
+ message_3: 'Login failed: admin, test - Group 3'
+ message_4: 'Login failed: admin, test - Group 4'
+ message_5: 'Login failed: admin, test - Group 5'
+ port: 514
+ protocol: tcp
+ eps: 100
+ messages_number: 100
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_single_thread.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_single_thread.yaml
new file mode 100644
index 0000000000..b8dc921075
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_processing_events_in_order_single_thread.yaml
@@ -0,0 +1,19 @@
+- name: messages events order - single-thread
+ description: Process events in messages order
+ configuration_parameters:
+ PORT: 514
+ PROTOCOL: tcp
+ MAXIMUM: '20'
+ TIMEFRAME: '5'
+ metadata:
+ maximum: 20
+ timeframe: 5
+ # syslog simulator parameters
+ address: localhost
+ message: 'Login failed: admin, test'
+ port: 514
+ protocol: tcp
+ numbered_messages: 1
+ messages_number_1: 300
+ eps: 300
+ messages_number_2: 200
diff --git a/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_queueing_events_after_limitation.yaml b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_queueing_events_after_limitation.yaml
new file mode 100644
index 0000000000..025f210b31
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/data/test_cases/event_processing_test_module/cases_queueing_events_after_limitation.yaml
@@ -0,0 +1,16 @@
+- name: queue non-processed events
+ description: Queueing events after processing limitation
+ configuration_parameters:
+ PORT: 514
+ PROTOCOL: tcp
+ MAXIMUM: '100'
+ TIMEFRAME: '10'
+ metadata:
+ maximum: 100
+ timeframe: 10
+ # syslog simulator parameters
+ address: localhost
+ port: 514
+ protocol: tcp
+ messages_number: 10000
+ eps: 1000
diff --git a/tests/integration/test_analysisd/test_limit_eps/test_basic.py b/tests/integration/test_analysisd/test_limit_eps/test_basic.py
new file mode 100644
index 0000000000..497cd5ea77
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/test_basic.py
@@ -0,0 +1,155 @@
+import os
+import pytest
+
+from wazuh_testing.tools.configuration import load_configuration_template, get_test_cases_data
+from wazuh_testing.modules.analysisd import event_monitor as evm
+from wazuh_testing.processes import check_if_daemons_are_running
+
+pytestmark = [pytest.mark.server]
+
+
+# Generic vars
+TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
+CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, 'configuration_template', 'basic_test_module')
+TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, 'test_cases', 'basic_test_module')
+local_internal_options = {'wazuh_modules.debug': '2', 'monitord.rotate_log': '0'}
+
+# ---------------------------------------------------- TEST_ENABLED ----------------------------------------------------
+# Configuration and cases data
+configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_enabled.yaml')
+t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_enabled.yaml')
+
+# Enabled test configurations (t1)
+t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path)
+t1_configurations = load_configuration_template(configurations_path, t1_configuration_parameters,
+ t1_configuration_metadata)
+
+# ---------------------------------------------------- TEST_DISABLED ---------------------------------------------------
+# Configuration and cases data
+t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_disabled.yaml')
+t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_disabled.yaml')
+
+# Disabled test configurations (t2)
+t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path)
+t2_configurations = load_configuration_template(t2_configurations_path, t2_configuration_parameters,
+ t2_configuration_metadata)
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids)
+def test_enabled(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration,
+ configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_daemon_function):
+ """
+ description: Check whether the event analysis limitation is activated after its activation in the configuration.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Check in the ossec.log that a line has appeared indicating that EPS limiting has been enabled.
+ - Check that wazuh-analysisd is running (it has not been crashed).
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check in the log that the EPS limitation has been activated.
+ - Check that wazuh-analysisd daemon does not crash.
+
+ input_description:
+ - The `configuration_enabled` file provides the module configuration for this test.
+ - The `cases_enabled` file provides the test cases.
+ """
+ evm.check_eps_enabled(metadata['maximum'], metadata['timeframe'])
+
+ # Check that wazuh-analysisd is running
+ assert check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is not running. Maybe it has crashed'
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids)
+def test_disabled(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration,
+ configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_daemon_function):
+ """
+ description: Check if when the EPS limitation setting is not applied, the feature is not activated.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Look in the ossec.log to see if the EPS limitation activation does not appear.
+ - Check that wazuh-analysisd is running (it has not been crashed).
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check in the ossec.log to see if the EPS limitation activation does not appear.
+ - Check that wazuh-analysisd daemon does not crash.
+
+ input_description:
+ - The `configuration_disabled` file provides the module configuration for this test.
+ - The `cases_disabled` file provides the test cases.
+ """
+ evm.check_eps_disabled()
+
+ # Check that wazuh-analysisd is running
+ assert check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is not running. Maybe it has crashed'
diff --git a/tests/integration/test_analysisd/test_limit_eps/test_configuration.py b/tests/integration/test_analysisd/test_limit_eps/test_configuration.py
new file mode 100644
index 0000000000..10dbeb5bb3
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/test_configuration.py
@@ -0,0 +1,265 @@
+import os
+import pytest
+
+from wazuh_testing.tools.configuration import load_configuration_template, get_test_cases_data
+from wazuh_testing.modules.analysisd import event_monitor as evm
+from wazuh_testing.tools.services import control_service
+from wazuh_testing.processes import check_if_daemons_are_running
+from wazuh_testing.tools import file
+from wazuh_testing import WAZUH_CONF_PATH
+
+# Reference paths
+TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
+CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, 'configuration_template', 'configuration_test_module')
+TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, 'test_cases', 'configuration_test_module')
+local_internal_options = {'wazuh_modules.debug': '2', 'monitord.rotate_log': '0'}
+
+# ------------------------------------------------ TEST_ACCEPTED_VALUES ------------------------------------------------
+# Configuration and cases data
+t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_accepted_values.yaml')
+t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_accepted_values.yaml')
+
+# Accepted values test configurations (t1)
+t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path)
+t1_configurations = load_configuration_template(t1_configurations_path, t1_configuration_parameters,
+ t1_configuration_metadata)
+
+# ------------------------------------------------- TEST_INVALID_VALUES ------------------------------------------------
+# Configuration and cases data
+t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_invalid_values.yaml')
+t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_invalid_values.yaml')
+
+# Invalid values test configurations (t2)
+t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path)
+t2_configurations = load_configuration_template(t2_configurations_path, t2_configuration_parameters,
+ t2_configuration_metadata)
+
+# --------------------------------------------- TEST_MISSING_CONFIGURATION ---------------------------------------------
+# Configuration and cases data
+t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_missing_configuration.yaml')
+t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_missing_configuration.yaml')
+
+# Invalid values test configurations (t2)
+t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path)
+t3_configurations = load_configuration_template(t3_configurations_path, t3_configuration_parameters,
+ t3_configuration_metadata)
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids)
+def test_accepted_values(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration,
+ configure_local_internal_options_module, truncate_monitored_files,
+ restart_wazuh_daemon_function):
+ """
+ description: Check that the EPS limitation is activated under accepted parameters.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Check in the log that the EPS limitation has been activated with the specified parameters.
+ - Check that wazuh-analysisd is running (it has not been crashed).
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check in the log that the EPS limitation has been activated with the specified parameters.
+ - Check that wazuh-analysisd daemon does not crash.
+
+ input_description:
+ - The `configuration_accepted_values` file provides the module configuration for this test.
+ - The `cases_accepted_values` file provides the test cases.
+ """
+ evm.check_eps_enabled(metadata['maximum'], metadata['timeframe'])
+
+ # Check that wazuh-analysisd is running
+ assert check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is not running. Maybe it has crashed'
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids)
+def test_invalid_values(configuration, metadata, restart_wazuh_daemon_after_finishing_function,
+ load_wazuh_basic_configuration, set_wazuh_configuration,
+ configure_local_internal_options_module, truncate_monitored_files):
+ """
+ description: Check for configuration error and wazuh-analysisd if the EPS limiting configuration has unaccepted
+ values. Done for the following cases:
+ - Maximum value above the allowed value.
+ - Timeframe value above the allowed value.
+ - Timeframe = 0
+ - Maximum, timeframe = 0
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - test:
+ - Restart wazuh-manager service to apply configuration changes.
+ - Check that a configuration error is raised when trying to start wazuh-manager.
+ - Check that wazuh-analysisd is not running (due to configuration error).
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+ - Restart the wazuh-manager service to apply initial configuration and start wazuh-analysisd daemon.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - restart_wazuh_daemon_after_finishing_function:
+ type: fixture
+ brief: Restart the wazuh service in tierdown stage.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+
+ assertions:
+ - Check that a configuration error is raised when trying to start wazuh-manager.
+ - Check that wazuh-analysisd is not running (due to configuration error).
+
+ input_description:
+ - The `configuration_invalid_values` file provides the module configuration for this test.
+ - The `cases_invalid_values` file provides the test cases.
+ """
+ try:
+ control_service('restart')
+ except ValueError:
+ pass
+ finally:
+ evm.check_configuration_error()
+ # Check that wazuh-analysisd is not running
+ assert not check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is running and was not ' \
+ 'expected to'
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids)
+def test_missing_configuration(configuration, metadata, restart_wazuh_daemon_after_finishing_function,
+ load_wazuh_basic_configuration, set_wazuh_configuration, truncate_monitored_files):
+ """
+ description: Checks what happens if tags are missing in the event analysis limitation settings. Done for the
+ following cases:
+ - Missing .
+ - Missing .
+ - Missing and .
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - test:
+ - Remove the specified tag in ossec.conf
+ - Restart wazuh-manager service to apply configuration changes.
+ - Check whether the EPS limitation is activated, deactivated or generates a configuration error due to a
+ missing label.
+ - Check if wazuh-analysisd is running or not (according to the expected behavior).
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+ - Restart the wazuh-manager service to apply initial configuration and start wazuh-analysisd daemon.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - restart_wazuh_daemon_after_finishing_function:
+ type: fixture
+ brief: Restart the wazuh service in tierdown stage.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+
+ assertions:
+ - Check whether the EPS limitation is activated, deactivated or generates a configuration error due to a
+ missing label.
+ - Check if wazuh-analysisd is running or not (according to the expected behavior).
+
+ input_description:
+ - The `configuration_missing_values` file provides the module configuration for this test.
+ - The `cases_missing_values` file provides the test cases.
+ """
+ # Remove test case tags from ossec.conf
+ file.replace_regex_in_file(metadata['remove_tags'], [''] * len(metadata['remove_tags']), WAZUH_CONF_PATH)
+
+ if metadata['behavior'] == 'works':
+ control_service('restart')
+ evm.check_eps_enabled(metadata['maximum'], 10) # 10 is the default timeframe
+ assert check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is not running. Maybe it has ' \
+ 'crashed'
+ elif metadata['behavior'] == 'disabled':
+ control_service('restart')
+ evm.check_eps_disabled()
+ assert check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is not running. Maybe it has ' \
+ 'crashed'
+ else:
+ try:
+ control_service('restart')
+ except ValueError:
+ pass
+ finally:
+ evm.check_configuration_error()
+ # Check that wazuh-analysisd is not running
+ assert not check_if_daemons_are_running(['wazuh-analysisd'])[0], 'wazuh-analysisd is running and was not ' \
+ 'expected to'
diff --git a/tests/integration/test_analysisd/test_limit_eps/test_event_processing.py b/tests/integration/test_analysisd/test_limit_eps/test_event_processing.py
new file mode 100644
index 0000000000..b6ecf758c8
--- /dev/null
+++ b/tests/integration/test_analysisd/test_limit_eps/test_event_processing.py
@@ -0,0 +1,605 @@
+import os
+import pytest
+import time
+import re
+from math import ceil
+from copy import deepcopy
+
+from wazuh_testing.tools.configuration import load_configuration_template, get_test_cases_data
+from wazuh_testing import ARCHIVES_LOG_PATH
+from wazuh_testing.modules.analysisd import event_monitor as evm
+from wazuh_testing.tools import file
+from wazuh_testing.modules.analysisd import QUEUE_EVENTS_SIZE, ANALYSISD_ONE_THREAD_CONFIG
+from wazuh_testing.scripts.syslog_simulator import DEFAULT_MESSAGE_SIZE
+from wazuh_testing.tools.run_simulator import syslog_simulator
+from wazuh_testing.tools.thread_executor import ThreadExecutor
+
+# Reference paths
+TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
+CONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, 'configuration_template', 'event_processing_test_module')
+TEST_CASES_PATH = os.path.join(TEST_DATA_PATH, 'test_cases', 'event_processing_test_module')
+SYSLOG_SIMULATOR_START_TIME = 2
+local_internal_options = {'wazuh_modules.debug': '2', 'monitord.rotate_log': '0', 'analysisd.state_interval': '1'}
+
+# --------------------------------------------------- TEST_LIMITATION --------------------------------------------------
+# Configuration and cases data
+t1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_limitation.yaml')
+t1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_limitation.yaml')
+
+# Limitation test configurations (t1)
+t1_configuration_parameters, t1_configuration_metadata, t1_case_ids = get_test_cases_data(t1_cases_path)
+t1_configurations = load_configuration_template(t1_configurations_path, t1_configuration_parameters,
+ t1_configuration_metadata)
+
+# ---------------------------------------- TEST_QUEUEING_EVENTS_AFTER_LIMITATION ---------------------------------------
+# Configuration and cases data
+t2_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_queueing_events_after_limitation.yaml')
+t2_cases_path = os.path.join(TEST_CASES_PATH, 'cases_queueing_events_after_limitation.yaml')
+
+# Queing event test configurations (t2)
+t2_configuration_parameters, t2_configuration_metadata, t2_case_ids = get_test_cases_data(t2_cases_path)
+t2_configurations = load_configuration_template(t2_configurations_path, t2_configuration_parameters,
+ t2_configuration_metadata)
+
+# --------------------------------------- TEST_DROPPING_EVENTS_WHEN_QUEUE_IS_FULL --------------------------------------
+# Configuration and cases data
+t3_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_drop_events_when_queue_is_full.yaml')
+t3_cases_path = os.path.join(TEST_CASES_PATH, 'cases_drop_events_when_queue_is_full.yaml')
+
+# Dropping events when queue is full test configurations (t3)
+t3_configuration_parameters, t3_configuration_metadata, t3_case_ids = get_test_cases_data(t3_cases_path)
+t3_configurations = load_configuration_template(t3_configurations_path, t3_configuration_parameters,
+ t3_configuration_metadata)
+
+# ------------------------------------ TEST_PROCESSING_EVENTS_IN_ORDER_SINGLE_THREAD -----------------------------------
+# Configuration and cases data
+t4_configurations_path = os.path.join(CONFIGURATIONS_PATH,
+ 'configuration_processing_events_in_order_single_thread.yaml')
+t4_cases_path = os.path.join(TEST_CASES_PATH, 'cases_processing_events_in_order_single_thread.yaml')
+
+# Processing events in order single thread test configurations (t4)
+t4_configuration_parameters, t4_configuration_metadata, t4_case_ids = get_test_cases_data(t4_cases_path)
+t4_configurations = load_configuration_template(t4_configurations_path, t4_configuration_parameters,
+ t4_configuration_metadata)
+t4_local_internal_options = {'wazuh_modules.debug': '2', 'monitord.rotate_log': '0', 'analysisd.state_interval': '1'}
+t4_local_internal_options.update(ANALYSISD_ONE_THREAD_CONFIG)
+
+# ------------------------------------ TEST_PROCESSING_EVENTS_IN_ORDER_MULTI_THREAD ------------------------------------
+# Configuration and cases data
+t5_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_processing_events_in_order_multi_thread.yaml')
+t5_cases_path = os.path.join(TEST_CASES_PATH, 'cases_processing_events_in_order_multi_thread.yaml')
+
+# Processing events in order multi thread test configurations (t5)
+t5_configuration_parameters, t5_configuration_metadata, t5_case_ids = get_test_cases_data(t5_cases_path)
+t5_configurations = load_configuration_template(t5_configurations_path, t5_configuration_parameters,
+ t5_configuration_metadata)
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configuration_metadata), ids=t1_case_ids)
+def test_limitation(configuration, metadata, load_wazuh_basic_configuration, set_wazuh_configuration,
+ configure_local_internal_options_function, truncate_monitored_files, restart_wazuh_daemon_function):
+ """
+ description: Check if after passing the event processing limit, the processing is stopped until the next timeframe.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Start the event simulator and check that the events are being received and analyzed.
+ - Wait until the event limit is reached and check that the events are still being received but not
+ processed.
+ - Wait until the next analysis period (next timeframe) and check that events are still being
+ processed, in this case the queued ones.
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check that events are received when expected.
+ - Check that events are processed when expected.
+ - Check that events are still received when expected.
+ - Check that no events are processed due to blocking.
+ - Check that events are still processed after blocking.
+
+ input_description:
+ - The `configuration_limitation` file provides the module configuration for this test.
+ - The `cases_limitation` file provides the test cases.
+ """
+ # Set syslog simulator parameters according to the use case data
+ syslog_simulator_parameters = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number']}
+
+ # Run syslog simulator thread
+ syslog_simulator_thread = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters})
+ syslog_simulator_thread.start()
+ waited_simulator_time = 0
+
+ # Wait until syslog simulator is started
+ time.sleep(SYSLOG_SIMULATOR_START_TIME)
+
+ # Get analysisd stats
+ analysisd_state = evm.get_analysisd_state()
+ events_received = int(analysisd_state['events_received'])
+ events_processed = int(analysisd_state['events_processed'])
+
+ # Check that wazuh-manager is processing syslog events
+ assert events_received > 0, '(0): No events are being received when it is expected'
+ assert events_processed > 0, 'No events are being processed when it is expected'
+
+ # Wait for the event non-processing phase to arrive (limit reached)
+ waiting_limit_time = ceil((metadata['maximum'] * metadata['timeframe']) / metadata['eps']) + 1 # Offset 1s
+ time.sleep(waiting_limit_time)
+ waited_simulator_time += waiting_limit_time
+
+ # Get analysisd stats in limitation stage
+ analysisd_state = evm.get_analysisd_state()
+ events_received = int(analysisd_state['events_received'])
+ events_processed = int(analysisd_state['events_processed'])
+
+ # Check that the wazuh-manager is receiving events but it is not processing them due to the limitation
+ assert events_received > 0, '(1): No events are being received when it is expected'
+ assert events_processed == 0, f"Events are being processed when the limit has been reached. {events_processed} != 0"
+
+ # Wait until the limited timeframe has elapsed
+ time.sleep(metadata['timeframe'] + 1 - waited_simulator_time) # Offset 1s
+
+ # Get analysisd stats in limitation stage
+ analysisd_state = evm.get_analysisd_state()
+ events_processed = int(analysisd_state['events_processed'])
+
+ # Check whether events continue to be processed after blocking
+ assert events_processed > 0, f"Event processing has not been continued after blocking"
+
+ # Wait until syslog simulator ends
+ syslog_simulator_thread.join()
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t2_configurations, t2_configuration_metadata), ids=t2_case_ids)
+def test_queueing_events_after_limitation(configuration, metadata, load_wazuh_basic_configuration,
+ set_wazuh_configuration, configure_local_internal_options_function,
+ truncate_monitored_files, restart_wazuh_daemon_function):
+ """
+ description: Check if after stopping processing events (due to limit reached), the received events are stored in
+ the events queue if it is not full.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Check that the initial events queue usage rate is 0%.
+ - Calculate when the limit of processed events is reached, waits a few seconds for events to be stored in
+ the events queue and takes a sample of the usage to check that it is higher than 0%.
+ - Wait a few seconds and takes a second sample again, to check that the events queue usage is higher than
+ the first sample.
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check that the queue usage at startup is 0%.
+ - Check that the queue usage grows after stopping processing events.
+ - Check that the queue usage continues to grow after stopping processing events.
+
+ input_description:
+ - The `configuration_queueing_events_after_limitation` file provides the module configuration for this test.
+ - The `cases_queueing_events_after_limitation` file provides the test cases.
+ """
+ # Get initial queue usage
+ analysisd_state = evm.get_analysisd_state()
+ event_queue_usage = float(analysisd_state['event_queue_usage'])
+
+ # Check that there are no events in the queue
+ assert event_queue_usage == 0.0, 'The initial events queue is not at 0%'
+
+ # Set syslog simulator parameters according to the use case data
+ syslog_simulator_parameters = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number']}
+
+ # Run syslog simulator thread
+ syslog_simulator_thread = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters})
+ syslog_simulator_thread.start()
+
+ # Wait for the event non-processing stage (limit reached)
+ waiting_limit_time = ceil((metadata['maximum'] * metadata['timeframe']) / metadata['eps']) + \
+ SYSLOG_SIMULATOR_START_TIME
+ time.sleep(waiting_limit_time)
+
+ # Get queue usage in limitation stage
+ analysisd_state = evm.get_analysisd_state()
+ event_queue_usage_sample_1 = float(analysisd_state['event_queue_usage'])
+
+ # Check that received and unprocessed events are being queued
+ assert event_queue_usage_sample_1 > 0.0, 'Events received after processing limitation are not being queued'
+
+ # Wait a few more seconds before passing the timeframe
+ waiting_time_sample_2 = ceil((metadata['timeframe'] - waiting_limit_time) / 2)
+ time.sleep(waiting_time_sample_2)
+
+ # Get queue usage in limitation stage
+ analysisd_state = evm.get_analysisd_state()
+ event_queue_usage_sample_2 = float(analysisd_state['event_queue_usage'])
+
+ # Check that events received and not processed are still being queued
+ assert event_queue_usage_sample_2 > event_queue_usage_sample_1, 'Events queue has not grown as expected during ' \
+ 'event limitation'
+ # Wait until syslog simulator ends
+ syslog_simulator_thread.join()
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t3_configurations, t3_configuration_metadata), ids=t3_case_ids)
+def test_dropping_events_when_queue_is_full(configuration, metadata, load_wazuh_basic_configuration,
+ set_wazuh_configuration, configure_local_internal_options_function,
+ truncate_monitored_files, restart_wazuh_daemon_function):
+ """
+ description: Check that after the event analysis block, if the events queue is full, the events are dropped.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Check that the initial queue usage rate is 0%.
+ - Calculate when the event analysis blocking phase is expected and the queue is full, then it measures the
+ use of the event queue to check that it is 100%, and that the received events are being dropped.
+ - tierdown:
+ - Truncate wazuh logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_monitored_files:
+ type: fixture
+ brief: Truncate wazuh logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check that the initial queue is at 0%.
+ - Check that after the event analysis block and the queue is full, events are still being received.
+ - Check that no events are processed when it is expected.
+ - Check that the event queue usage is at 100% when it is expected.
+ - Check that all events received are being dropped because the queue is full.
+
+ input_description:
+ - The `configuration_dropping_events_when_queue_is_full` file provides the module configuration for this test.
+ - The `cases_dropping_events_when_queue_is_full` file provides the test cases.
+ """
+ # Get initial queue usage
+ analysisd_state = evm.get_analysisd_state()
+ event_queue_usage = float(analysisd_state['event_queue_usage'])
+
+ # Check that there are no events in the queue
+ assert event_queue_usage == 0.0, 'The initial events queue is not at 0%'
+
+ # Set syslog simulator parameters according to the use case data
+ syslog_simulator_parameters = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number']}
+
+ # Run syslog simulator thread
+ syslog_simulator_thread = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters})
+ syslog_simulator_thread.start()
+
+ # Calculate the non-processing stage (limit reached)
+ waiting_limit_time = ceil((metadata['maximum'] * metadata['timeframe']) / metadata['eps']) + \
+ SYSLOG_SIMULATOR_START_TIME
+
+ # Calculate the stage when the events queue is full (offset 4 sec to check all received-dropped events)
+ waiting_time_queue_is_full = waiting_limit_time + ((QUEUE_EVENTS_SIZE / DEFAULT_MESSAGE_SIZE) / metadata['eps']) + 4
+ time.sleep(waiting_time_queue_is_full)
+
+ # Get analysisd stats
+ analysisd_state = evm.get_analysisd_state()
+ event_queue_usage = float(analysisd_state['event_queue_usage'])
+ events_dropped = float(analysisd_state['events_dropped'])
+ events_received = int(analysisd_state['events_received'])
+ events_processed = int(analysisd_state['events_processed'])
+
+ # Check that events are received, not processed and that they are dropped when the queue is full
+ assert events_received > 0, ' No events are being received when it is expected'
+ assert events_processed == 0, 'Events are being processed when they are not expected (due to the limit)'
+ assert event_queue_usage == 1.0, 'The events queue is not full as expected'
+ assert events_dropped == events_received, 'No events are being dropped even though the queue is full'
+
+ # Wait until syslog simulator ends
+ syslog_simulator_thread.join()
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t4_configurations, t4_configuration_metadata), ids=t4_case_ids)
+@pytest.mark.parametrize('configure_local_internal_options_function', [t4_local_internal_options], indirect=True)
+def test_event_processing_in_order_single_thread(configuration, metadata, load_wazuh_basic_configuration,
+ set_wazuh_configuration, configure_local_internal_options_function,
+ truncate_event_logs, restart_wazuh_daemon_function):
+ """
+ description: Check that events are processed in order according to the position within the queue, and
+ that events that are being received during the blocking phase are being added to the end of the queue when
+ using single-thread processing.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh event logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Send a batch of identified events.
+ - Wait a few seconds, then send another batch of identified events.
+ - Wait until all events are processed.
+ - Read the event log (archives.log) and check that the events have been processed in the expected order.
+ - tierdown:
+ - Truncate wazuh event logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_event_logs:
+ type: fixture
+ brief: Truncate wazuh event logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check that all expected events have been stored in the archives.log.
+ - Check that all events have been generated in the archives.log according to the expected order.
+
+ input_description:
+ - The `configuration_event_processing_in_order_single_thread` file provides the module configuration for this
+ test.
+ - The `cases_event_processing_in_order_single_thread` file provides the test cases.
+ """
+ # Set syslog simulator parameters according to the use case data
+ syslog_simulator_parameters_1 = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number_1'], 'message': metadata['message'],
+ 'numbered_messages': metadata['numbered_messages']}
+
+ # Run syslog simulator thread
+ syslog_simulator_thread_1 = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters_1})
+ syslog_simulator_thread_1.start()
+
+ # Wait until the first processing interval has passed.
+ waiting_time = metadata['timeframe']
+ time.sleep(waiting_time)
+
+ # Run syslog simulator to send new events when events sent previously still have to be processed
+ # (they are in the queue)
+ syslog_simulator_parameters_2 = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number_2'], 'message': metadata['message'],
+ 'numbered_messages': metadata['messages_number_1'] + 1}
+ syslog_simulator_thread_2 = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters_2})
+ syslog_simulator_thread_2.start()
+
+ # Wait until all events have been processed
+ waiting_time = ((metadata['messages_number_1'] + metadata['messages_number_2']) /
+ (metadata['maximum'] * metadata['timeframe'])) * metadata['timeframe'] + SYSLOG_SIMULATOR_START_TIME
+ time.sleep(waiting_time)
+
+ # Read the events log data
+ events_data = file.read_file(ARCHIVES_LOG_PATH).split('\n')
+ expected_num_events = metadata['messages_number_1'] + metadata['messages_number_2']
+
+ # Check that all events have been recorded in the log file
+ assert len(events_data) >= expected_num_events, \
+ f"Not all expected events were found in the archives.log. Found={len(events_data)}, " \
+ f"expected>={expected_num_events}"
+
+ # Get the IDs of event messages
+ event_ids = [int(re.search(fr"{metadata['message']} - (\d+)", event).group(1)) for event in events_data
+ if bool(re.match(fr".*{metadata['message']} - (\d+)", event))]
+
+ # Check that the event message IDs are in order
+ assert all(event_ids[i] <= event_ids[i+1] for i in range(len(event_ids) - 1)), 'Events have not been processed ' \
+ 'in the expected order'
+
+ # Wait until syslog simulator ends
+ syslog_simulator_thread_1.join()
+ syslog_simulator_thread_2.join()
+
+
+@pytest.mark.tier(level=0)
+@pytest.mark.parametrize('configuration, metadata', zip(t5_configurations, t5_configuration_metadata), ids=t5_case_ids)
+def test_event_processing_in_order_multi_thread(configuration, metadata, load_wazuh_basic_configuration,
+ set_wazuh_configuration, configure_local_internal_options_function,
+ truncate_event_logs, restart_wazuh_daemon_function):
+ """
+ description: Check that events are processed in order according to the position within the queue, and
+ that events that are being received during the blocking phase are being added to the end of the queue when
+ using multi-thread processing.
+
+ test_phases:
+ - setup:
+ - Load Wazuh light configuration.
+ - Apply ossec.conf configuration changes according to the configuration template and use case.
+ - Apply custom settings in local_internal_options.conf.
+ - Truncate wazuh event logs.
+ - Restart wazuh-manager service to apply configuration changes.
+ - test:
+ - Send a batch of identified events.
+ - Wait a few seconds, then send another batch of identified events. This is repeated n times.
+ - Wait until all events are processed.
+ - Read the event log (archives.log) and check that the events have been processed in the expected order.
+ - tierdown:
+ - Truncate wazuh event logs.
+ - Restore initial configuration, both ossec.conf and local_internal_options.conf.
+
+ wazuh_min_version: 4.4.0
+
+ parameters:
+ - configuration:
+ type: dict
+ brief: Get configurations from the module.
+ - metadata:
+ type: dict
+ brief: Get metadata from the module.
+ - load_wazuh_basic_configuration:
+ type: fixture
+ brief: Load basic wazuh configuration.
+ - set_wazuh_configuration:
+ type: fixture
+ brief: Apply changes to the ossec.conf configuration.
+ - configure_local_internal_options_function:
+ type: fixture
+ brief: Apply changes to the local_internal_options.conf configuration.
+ - truncate_event_logs:
+ type: fixture
+ brief: Truncate wazuh event logs.
+ - restart_wazuh_daemon_function:
+ type: fixture
+ brief: Restart the wazuh service.
+
+ assertions:
+ - Check that all expected events have been stored in the archives.log.
+ - Check that all events have been generated in the archives.log according to the expected order.
+
+ input_description:
+ - The `configuration_event_processing_in_order_multi_thread` file provides the module configuration for this
+ test.
+ - The `cases_event_processing_in_order_multi_thread` file provides the test cases.
+ """
+ # Set syslog simulator parameters according to the use case data
+ parameters = []
+ syslog_simulator_threads = []
+ syslog_simulator_parameters = {'address': metadata['address'], 'port': metadata['port'],
+ 'protocol': metadata['protocol'], 'eps': metadata['eps'],
+ 'messages_number': metadata['messages_number'], 'message': metadata['message_1']}
+ # Create syslog simulator threads
+ for index in range(metadata['num_batches']):
+ parameters.append(deepcopy(syslog_simulator_parameters))
+ parameters[index].update({'message': metadata[f"message_{index + 1}"]})
+ syslog_simulator_threads.append(ThreadExecutor(syslog_simulator, {'parameters': parameters[index]}))
+
+ # Start syslog simulator threads
+ for thread in syslog_simulator_threads:
+ thread.start()
+ time.sleep(metadata['batch_sending_time'])
+
+ # Wait until all events have been processed
+ waiting_time_to_process_all_events = \
+ ((metadata['messages_number'] * metadata['num_batches']) /
+ (metadata['maximum'] * metadata['timeframe'])) * metadata['timeframe'] + SYSLOG_SIMULATOR_START_TIME
+
+ waited_time_to_create_threads = metadata['batch_sending_time'] * metadata['num_batches']
+ time.sleep(waiting_time_to_process_all_events - waited_time_to_create_threads)
+
+ # Read the events log data
+ events_data = file.read_file(ARCHIVES_LOG_PATH).split('\n')
+ expected_num_events = metadata['batch_sending_time'] * metadata['num_batches']
+
+ # Check that all events have been recorded in the log file
+ assert len(events_data) >= expected_num_events, \
+ f"Not all expected events were found in the archives.log. Found={len(events_data)}, " \
+ f"expected>={expected_num_events}"
+
+ # Get the IDs of event messages
+ event_ids = [int(re.search(fr"{metadata['message_1']} - Group (\d+)", event).group(1)) for event in events_data
+ if bool(re.match(fr".*{metadata['message_1']} - Group (\d+)", event))]
+
+ # Check that the event message IDs are in order
+ assert all(event_ids[i] <= event_ids[i+1] for i in range(len(event_ids) - 1)), 'Events have not been processed ' \
+ 'in the expected order'
+ # Wait until all syslog simulator threads finish
+ for thread in syslog_simulator_threads:
+ thread.join()
diff --git a/tests/reliability/agent_connection/test_keep_alive.py b/tests/reliability/agent_connection/test_keep_alive.py
index 6dbc428cf0..bc67f668b1 100644
--- a/tests/reliability/agent_connection/test_keep_alive.py
+++ b/tests/reliability/agent_connection/test_keep_alive.py
@@ -73,7 +73,7 @@
def test_keep_alives(get_report):
'''
description: Check if the communication between managers and agents works as expected.
- This test ensures that ACK and keep alive does not overcome the specified maximum. The condition is checked using
+ This test ensures that ACK and keep alive does not overcome the specified maximum. The condition is checked using
the agentd statistics data and the keep-alives received by the manager in the logs file.
wazuh_min_version: 4.4.0
@@ -86,7 +86,7 @@ def test_keep_alives(get_report):
assertions:
- Verify agents maximum difference between ack and keepalive is less than specified maximum.
- Verify that the max_difference between keeps alives of all the agents in the managers side is less that
- the specified maximun.
+ the specified maximum.
- Verify the number of keepalives of each agent is the expected.
input_description: JSON environment reports