diff --git a/docker/mailoney/dist/schizo_open_relay.py b/docker/mailoney/dist/schizo_open_relay.py
deleted file mode 100644
index d8a19a9a..00000000
--- a/docker/mailoney/dist/schizo_open_relay.py
+++ /dev/null
@@ -1,382 +0,0 @@
-__author__ = '@botnet_hunter'
-
-from datetime import datetime
-import socket
-try:
-    import libemu
-except ImportError:
-    libemu = None
-import sys
-import errno
-import time
-import threading
-from time import gmtime, strftime
-import asyncore
-import asynchat
-import re
-import json
-
-sys.path.append("../")
-import mailoney
-
-output_lock = threading.RLock()
-hpc,hpfeeds_prefix = mailoney.connect_hpfeeds()
-
-def string_escape(s, encoding='utf-8'):
-    return (s.encode('latin1')         # To bytes, required by 'unicode-escape'
-             .decode('unicode-escape') # Perform the actual octal-escaping decode
-             .encode('latin1')         # 1:1 mapping back to bytes
-             .decode(encoding))        # Decode original encoding
-
-# def log_to_file(file_path, ip, port, data):
-    # with output_lock:
-        # with open(file_path, "a") as f:
-            # message = "[{0}][{1}:{2}] {3}".format(time.time(), ip, port, string_escape(data))
-            # print(file_path + " " + message)
-            # f.write(message + "\n")
-
-def log_to_file(file_path, ip, port, data):
-    with output_lock:
-        try:
-            with open(file_path, "a") as f:
-                # Find all email addresses in the data
-                emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b', data)
-                if len(data) > 4096:
-                    data = "BIGSIZE"
-                dictmap = {
-                    'timestamp': strftime("20%y-%m-%dT%H:%M:%S.000000Z", gmtime()), 
-                    'src_ip': ip, 
-                    'src_port': port,  
-                    'data': data, 
-                    'smtp_input': emails
-                }
-                # Serialize the dictionary to a JSON-formatted string
-                json_data = json.dumps(dictmap)
-                f.write(json_data + '\n')
-                # Format the message for logging
-                message = "[{0}][{1}:{2}] {3}".format(time(), ip, port, repr(data))
-                # Log the message to console
-                print(file_path + " " + message)
-        except Exception as e:
-            # Log the error (or pass a specific message)
-            print("An error occurred while logging to file: ", str(e))
-
-def log_to_hpfeeds(channel, data):
-        if hpc:
-            message = data
-            hpfchannel=hpfeeds_prefix+"."+channel
-            hpc.publish(hpfchannel, message)
-
-def process_packet_for_shellcode(packet, ip, port):
-    if libemu is None:
-        return
-    emulator = libemu.Emulator()
-    r = emulator.test(packet)
-    if r is not None:
-        # we have shellcode
-        log_to_file(mailoney.logpath+"/shellcode.log", ip, port, "We have some shellcode")
-        #log_to_file(mailoney.logpath+"/shellcode.log", ip, port, emulator.emu_profile_output)
-        #log_to_hpfeeds("/shellcode", ip, port, emulator.emu_profile_output)
-        log_to_file(mailoney.logpath+"/shellcode.log", ip, port, packet)
-        log_to_hpfeeds("shellcode",  json.dumps({ "Timestamp":format(time.time()), "ServerName": self.__fqdn, "SrcIP": self.__addr[0], "SrcPort": self.__addr[1],"Shellcode" :packet}))
-
-def generate_version_date():
-    now = datetime.now()
-    week_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-    return "{0}, {1} {2} {3} {4}:{5}:{6}".format(week_days[now.weekday()], now.day, months[now.month - 1], now.year, str(now.hour).zfill(2), str(now.minute).zfill(2), str(now.second).zfill(2))
-
-__version__ = 'ESMTP Exim 4.69 #1 {0} -0700'.format(generate_version_date())
-EMPTYSTRING = b''
-NEWLINE = b'\n'
-
-class SMTPChannel(asynchat.async_chat):
-    COMMAND = 0
-    DATA = 1
-
-    def __init__(self, server, conn, addr):
-        asynchat.async_chat.__init__(self, conn)
-        self.__rolling_buffer = b""
-        self.__server = server
-        self.__conn = conn
-        self.__addr = addr
-        self.__line = []
-        self.__state = self.COMMAND
-        self.__greeting = 0
-        self.__mailfrom = None
-        self.__rcpttos = []
-        self.__data = ''
-        from mailoney import srvname
-        self.__fqdn = srvname
-        try:
-            self.__peer = conn.getpeername()
-        except socket.error as err:
-            # a race condition  may occur if the other end is closing
-            # before we can get the peername
-            self.close()
-            # Instead of directly subscripting the err, use err.errno to get the error code.
-            if err.errno != errno.ENOTCONN:
-                raise
-            return
-        #print(>> DEBUGSTREAM, 'Peer:', repr(self.__peer))
-        #self.set_terminator(b'\r\n')
-        self.set_terminator(b'\n')
-        self.push('220 %s %s' % (self.__fqdn, __version__))
-
-    # Overrides base class for convenience
-    def push(self, msg):
-        if type(msg) == str:
-            encoded_msg = msg.encode() 
-        elif type(msg) == bytes:
-            encoded_msg = msg
-
-        asynchat.async_chat.push(self, encoded_msg + self.terminator)
-
-    # Implementation of base class abstract method
-    def collect_incoming_data(self, data):
-        self.__line.append(data)
-        self.__rolling_buffer += data
-        if len(self.__rolling_buffer) > 1024 * 1024:
-            self.__rolling_buffer = self.__rolling_buffer[len(self.__rolling_buffer) - 1024 * 1024:]
-        process_packet_for_shellcode(self.__rolling_buffer, self.__addr[0], self.__addr[1])
-        del data
-
-    # Implementation of base class abstract method
-    def found_terminator(self):
-
-        line = EMPTYSTRING.join(self.__line).decode()
-        log_to_file(mailoney.logpath+"/commands.log", self.__addr[0], self.__addr[1], string_escape(line))
-        log_to_hpfeeds("commands",  json.dumps({ "Timestamp":format(time.time()), "ServerName": self.__fqdn, "SrcIP": self.__addr[0], "SrcPort": self.__addr[1],"Commmand" : string_escape(line)}))
-
-        #print(>> DEBUGSTREAM, 'Data:', repr(line))
-        self.__line = []
-        if self.__state == self.COMMAND:
-            if not line:
-                self.push('500 Error: bad syntax')
-                return
-            method = None
-            i = line.find(' ')
-            if i < 0:
-                command = line.upper()
-                arg = None
-            else:
-                command = line[:i].upper()
-                arg = line[i+1:].strip()
-            method = getattr(self, 'smtp_' + command, None)
-            if not method:
-                self.push('502 Error: command "%s" not implemented' % command)
-                return
-            method(arg)
-            return
-        else:
-            if self.__state != self.DATA:
-                self.push('451 Internal confusion')
-                return
-            # Remove extraneous carriage returns and de-transparency according
-            # to RFC 821, Section 4.5.2.
-            data = []
-            for text in line.split('\r\n'):
-                if text and text[0] == '.':
-                    data.append(text[1:])
-                else:
-                    data.append(text)
-            self.__data = NEWLINE.join(data)
-            status = self.__server.process_message(self.__peer, self.__mailfrom, self.__rcpttos, self.__data)
-            self.__rcpttos = []
-            self.__mailfrom = None
-            self.__state = self.COMMAND
-            self.set_terminator('\r\n')
-            if not status:
-                self.push('250 Ok')
-            else:
-                self.push(status)
-
-    # SMTP and ESMTP commands
-    def smtp_HELO(self, arg):
-        if not arg:
-            self.push('501 Syntax: HELO hostname')
-            return
-        if self.__greeting:
-            self.push('503 Duplicate HELO/EHLO')
-        else:
-            self.__greeting = arg
-            self.push('250 %s' % self.__fqdn)
-
-    def smtp_EHLO(self, arg):
-        if not arg:
-            self.push('501 Syntax: EHLO hostname')
-            return
-        if self.__greeting:
-            self.push('503 Duplicate HELO/EHLO')
-        else:
-            self.__greeting = arg
-            self.push('250-{0} Hello {1} [{2}]'.format(self.__fqdn, arg, self.__addr[0]))
-            self.push('250-SIZE 52428800')
-            self.push('250 AUTH LOGIN PLAIN')
-
-    def smtp_NOOP(self, arg):
-        if arg:
-            self.push('501 Syntax: NOOP')
-        else:
-            self.push('250 Ok')
-
-    def smtp_QUIT(self, arg):
-        # args is ignored
-        self.push('221 Bye')
-        self.close_when_done()
-
-    def smtp_AUTH(self, arg):
-        # Accept any auth attempt
-        self.push('235 Authentication succeeded')
-
-    # factored
-    def __getaddr(self, keyword, arg):
-        address = None
-        keylen = len(keyword)
-        if arg[:keylen].upper() == keyword:
-            address = arg[keylen:].strip()
-            if not address:
-                pass
-            elif address[0] == '<' and address[-1] == '>' and address != '<>':
-                # Addresses can be in the form <person@dom.com> but watch out
-                # for null address, e.g. <>
-                address = address[1:-1]
-        return address
-
-    def smtp_MAIL(self, arg):
-        #print(>> DEBUGSTREAM, '===> MAIL', arg)
-        address = self.__getaddr('FROM:', arg) if arg else None
-        if not address:
-            self.push('501 Syntax: MAIL FROM:<address>')
-            return
-        if self.__mailfrom:
-            self.push('503 Error: nested MAIL command')
-            return
-        self.__mailfrom = address
-        #print(>> DEBUGSTREAM, 'sender:', self.__mailfrom)
-        self.push('250 Ok')
-
-    def smtp_RCPT(self, arg):
-        #print(>> DEBUGSTREAM, '===> RCPT', arg)
-        if not self.__mailfrom:
-            self.push('503 Error: need MAIL command')
-            return
-        address = self.__getaddr('TO:', arg) if arg else None
-        if not address:
-            self.push('501 Syntax: RCPT TO: <address>')
-            return
-        self.__rcpttos.append(address)
-        #print(>> DEBUGSTREAM, 'recips:', self.__rcpttos)
-        self.push('250 Ok')
-
-    def smtp_RSET(self, arg):
-        if arg:
-            self.push('501 Syntax: RSET')
-            return
-        # Resets the sender, recipients, and data, but not the greeting
-        self.__mailfrom = None
-        self.__rcpttos = []
-        self.__data = ''
-        self.__state = self.COMMAND
-        self.push('250 Ok')
-
-    def smtp_DATA(self, arg):
-        if not self.__rcpttos:
-            self.push('503 Error: need RCPT command')
-            return
-        if arg:
-            self.push('501 Syntax: DATA')
-            return
-        self.__state = self.DATA
-        self.set_terminator('\r\n.\r\n')
-        self.push('354 End data with <CR><LF>.<CR><LF>')
-
-
-class SMTPServer(asyncore.dispatcher):
-    def __init__(self, localaddr, remoteaddr):
-        self._localaddr = localaddr
-        self._remoteaddr = remoteaddr
-        asyncore.dispatcher.__init__(self)
-        try:
-            self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
-            # try to re-use a server port if possible
-            self.set_reuse_addr()
-            self.bind(localaddr)
-            self.listen(5)
-        except:
-            # cleanup asyncore.socket_map before raising
-            self.close()
-            raise
-        else:
-            pass
-            #print(>> DEBUGSTREAM, '%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (self.__class__.__name__, time.ctime(time.time()), localaddr, remoteaddr))
-
-    def handle_accept(self):
-        pair = self.accept()
-        if pair is not None:
-            conn, addr = pair
-            channel = SMTPChannel(self, conn, addr)
-
-    def handle_close(self):
-        self.close()
-
-    # API for "doing something useful with the message"
-    def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,rcpt_options=None):
-        """Override this abstract method to handle messages from the client.
-
-        peer is a tuple containing (ipaddr, port) of the client that made the
-        socket connection to our smtp port.
-
-        mailfrom is the raw address the client claims the message is coming
-        from.
-
-        rcpttos is a list of raw addresses the client wishes to deliver the
-        message to.
-
-        data is a string containing the entire full text of the message,
-        headers (if supplied) and all.  It has been `de-transparencied'
-        according to RFC 821, Section 4.5.2.  In other words, a line
-        containing a `.' followed by other text has had the leading dot
-        removed.
-
-        This function should return None, for a normal `250 Ok' response;
-        otherwise it returns the desired response string in RFC 821 format.
-
-        """
-        raise NotImplementedError
-
-
-
-def module():
-
-    class SchizoOpenRelay(SMTPServer):
-
-        def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,rcpt_options=None):
-            #setup the Log File
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], '')
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], '*' * 50)
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Mail from: {0}'.format(mailfrom))
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Mail to: {0}'.format(", ".join(rcpttos)))
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], 'Data:')
-            log_to_file(mailoney.logpath+"/mail.log", peer[0], peer[1], data)
-
-            loghpfeeds = {}
-            loghpfeeds['ServerName'] = mailoney.srvname
-            loghpfeeds['Timestamp'] = format(time.time())
-            loghpfeeds['SrcIP'] = peer[0]
-            loghpfeeds['SrcPort'] = peer[1]
-            loghpfeeds['MailFrom'] = mailfrom
-            loghpfeeds['MailTo'] = format(", ".join(rcpttos))
-            loghpfeeds['Data'] = data
-            log_to_hpfeeds("mail", json.dumps(loghpfeeds))
-
-
-    def run():
-        honeypot = SchizoOpenRelay((mailoney.bind_ip, mailoney.bind_port), None)
-        print('[*] Mail Relay listening on {}:{}'.format(mailoney.bind_ip, mailoney.bind_port))
-        try:
-            asyncore.loop()
-            print("exiting for some unknown reason")
-        except KeyboardInterrupt:
-            print('Detected interruption, terminating...')
-    run()
diff --git a/docker/nginx/builder/esvue/Dockerfile b/docker/nginx/builder/esvue/Dockerfile
index b1138e81..589a7d1e 100644
--- a/docker/nginx/builder/esvue/Dockerfile
+++ b/docker/nginx/builder/esvue/Dockerfile
@@ -4,7 +4,8 @@ FROM node:20-alpine AS builder
 #
 # Prep and build Elasticvue 
 RUN apk -U --no-cache add git && \
-    git clone https://github.com/cars10/elasticvue -b v1.0.4 /opt/src && \
+    # git clone https://github.com/cars10/elasticvue -b v1.0.4 /opt/src && \
+    git clone https://github.com/t3chn0m4g3/elasticvue /opt/src && \
 # We need to adjust consts.ts so the user has connection suggestion for reverse proxied ES
     sed -i "s#export const DEFAULT_CLUSTER_URI = 'http://localhost:9200'#export const DEFAULT_CLUSTER_URI = window.location.origin + '/es'#g" /opt/src/src/consts.ts && \
     sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/index.html && \
@@ -14,7 +15,8 @@ RUN apk -U --no-cache add git && \
     cp /opt/src/yarn.lock . && \
     yarn install && \
     cp -R /opt/src/* . && \
-    VITE_APP_BUILD_MODE=docker VUE_APP_PUBLIC_PATH=/elasticvue/ yarn build && \
+    export VITE_APP_BUILD_MODE=docker && \
+    export VITE_APP_PUBLIC_PATH="/elasticvue/" && \
     yarn build && \
     cd dist && \
     tar cvfz esvue.tgz *
diff --git a/docker/nginx/builder/esvue/Dockerfile.old b/docker/nginx/builder/esvue/Dockerfile.old
deleted file mode 100644
index c8f869a8..00000000
--- a/docker/nginx/builder/esvue/Dockerfile.old
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM node:14.18-alpine AS builder
-#
-# Prep and build Elasticvue 
-RUN apk -U --no-cache add git && \
-    git clone https://github.com/cars10/elasticvue -b v0.44.0 /opt/src && \
-# We need to adjust consts.js so the user has connection suggestion for reverse proxied ES
-    sed -i "s#export const DEFAULT_HOST = 'http://localhost:9200'#export const DEFAULT_HOST = window.location.origin + '/es'#g" /opt/src/src/consts.js && \
-    sed -i 's#href="/images/logo/favicon.ico"#href="images/logo/favicon.ico"#g' /opt/src/public/index.html && \
-    mkdir /opt/app && \
-    cd /opt/app && \
-    cp /opt/src/package.json . && \
-    cp /opt/src/yarn.lock . && \
-    yarn install --ignore-optional && \
-    cp -R /opt/src/* . && \
-# We need to set this ENV so we can run Elasticvue in its own location rather than /
-    VUE_APP_PUBLIC_PATH=/elasticvue/ yarn build && \
-    cd dist && \
-    tar cvfz esvue.tgz *
-#    
-FROM scratch AS exporter
-COPY --from=builder /opt/app/dist/esvue.tgz /
diff --git a/docker/nginx/builder/esvue/build.sh b/docker/nginx/builder/esvue/build.sh
index e55dfe4b..c79afe0e 100755
--- a/docker/nginx/builder/esvue/build.sh
+++ b/docker/nginx/builder/esvue/build.sh
@@ -1,5 +1,3 @@
 #!/bin/bash
 # Needs buildx to build. Run tpotce/bin/setup-builder.sh first
-echo "do not build!"
-exit 0
 docker buildx build --no-cache --progress plain --output ../../dist/html/esvue/ .
diff --git a/docker/nginx/dist/html/esvue/esvue.tgz b/docker/nginx/dist/html/esvue/esvue.tgz
index dfd419c5..fd831e2a 100644
Binary files a/docker/nginx/dist/html/esvue/esvue.tgz and b/docker/nginx/dist/html/esvue/esvue.tgz differ
diff --git a/docker/suricata/dist/suricata_new.yaml b/docker/suricata/dist/suricata_new.yaml
deleted file mode 100644
index beda0779..00000000
--- a/docker/suricata/dist/suricata_new.yaml
+++ /dev/null
@@ -1,2167 +0,0 @@
-%YAML 1.1
----
-
-# Suricata configuration file. In addition to the comments describing all
-# options in this file, full documentation can be found at:
-# https://docs.suricata.io/en/latest/configuration/suricata-yaml.html
-
-# This configuration file generated by Suricata 7.0.2.
-suricata-version: "7.0"
-
-##
-## Step 1: Inform Suricata about your network
-##
-
-vars:
-  # more specific is better for alert accuracy and performance
-  address-groups:
-    HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
-    #HOME_NET: "[192.168.0.0/16]"
-    #HOME_NET: "[10.0.0.0/8]"
-    #HOME_NET: "[172.16.0.0/12]"
-    #HOME_NET: "any"
-
-    EXTERNAL_NET: "!$HOME_NET"
-    #EXTERNAL_NET: "any"
-
-    HTTP_SERVERS: "$HOME_NET"
-    SMTP_SERVERS: "$HOME_NET"
-    SQL_SERVERS: "$HOME_NET"
-    DNS_SERVERS: "$HOME_NET"
-    TELNET_SERVERS: "$HOME_NET"
-    AIM_SERVERS: "$EXTERNAL_NET"
-    DC_SERVERS: "$HOME_NET"
-    DNP3_SERVER: "$HOME_NET"
-    DNP3_CLIENT: "$HOME_NET"
-    MODBUS_CLIENT: "$HOME_NET"
-    MODBUS_SERVER: "$HOME_NET"
-    ENIP_CLIENT: "$HOME_NET"
-    ENIP_SERVER: "$HOME_NET"
-
-  port-groups:
-    HTTP_PORTS: "80,8080,8081"
-    SHELLCODE_PORTS: "!80,!8080,!8081"
-    ORACLE_PORTS: "1433,1521,3306"
-    SSH_PORTS: "22,64295"
-    DNP3_PORTS: 20000
-    MODBUS_PORTS: 502
-    FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
-    FTP_PORTS: 21
-    GENEVE_PORTS: 6081
-    VXLAN_PORTS: 4789
-    TEREDO_PORTS: 3544
-
-##
-## Step 2: Select outputs to enable
-##
-
-# The default logging directory.  Any log or output file will be
-# placed here if it's not specified with a full path name. This can be
-# overridden with the -l command line parameter.
-default-log-dir: /var/log/suricata/
-
-# Global stats configuration
-stats:
-  enabled: no
-  # The interval field (in seconds) controls the interval at
-  # which stats are updated in the log.
-  interval: 8
-  # Add decode events to stats.
-  #decoder-events: true
-  # Decoder event prefix in stats. Has been 'decoder' before, but that leads
-  # to missing events in the eve.stats records. See issue #2225.
-  #decoder-events-prefix: "decoder.event"
-  # Add stream events as stats.
-  #stream-events: false
-
-# Plugins -- Experimental -- specify the filename for each plugin shared object
-plugins:
-#   - /path/to/plugin.so
-
-# Configure the type of alert (and other) logging you would like.
-outputs:
-  # a line based alerts log similar to Snort's fast.log
-  - fast:
-      enabled: no
-      filename: fast.log
-      append: yes
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # Extensible Event Format (nicknamed EVE) event log in JSON format
-  - eve-log:
-      enabled: yes
-      filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
-      filename: eve.json
-      # Enable for multi-threaded eve.json output; output files are amended with
-      # an identifier, e.g., eve.9.json
-      #threaded: false
-      #prefix: "@cee: " # prefix to prepend to each log entry
-      # the following are valid when type: syslog above
-      #identity: "suricata"
-      #facility: local5
-      #level: Info ## possible levels: Emergency, Alert, Critical,
-                   ## Error, Warning, Notice, Info, Debug
-      #ethernet: no  # log ethernet header in events when available
-      #redis:
-      #  server: 127.0.0.1
-      #  port: 6379
-      #  async: true ## if redis replies are read asynchronously
-      #  mode: list ## possible values: list|lpush (default), rpush, channel|publish
-      #             ## lpush and rpush are using a Redis list. "list" is an alias for lpush
-      #             ## publish is using a Redis channel. "channel" is an alias for publish
-      #  key: suricata ## key or channel to use (default to suricata)
-      # Redis pipelining set up. This will enable to only do a query every
-      # 'batch-size' events. This should lower the latency induced by network
-      # connection at the cost of some memory. There is no flushing implemented
-      # so this setting should be reserved to high traffic Suricata deployments.
-      #  pipelining:
-      #    enabled: yes ## set enable to yes to enable query pipelining
-      #    batch-size: 10 ## number of entries to keep in buffer
-
-      # Include top level metadata. Default yes.
-      #metadata: no
-
-      # include the name of the input pcap file in pcap file processing mode
-      pcap-file: false
-
-      # Community Flow ID
-      # Adds a 'community_id' field to EVE records. These are meant to give
-      # records a predictable flow ID that can be used to match records to
-      # output of other tools such as Zeek (Bro).
-      #
-      # Takes a 'seed' that needs to be same across sensors and tools
-      # to make the id less predictable.
-
-      # enable/disable the community id feature.
-      community-id: false
-      # Seed value for the ID output. Valid values are 0-65535.
-      community-id-seed: 0
-
-      # HTTP X-Forwarded-For support by adding an extra field or overwriting
-      # the source or destination IP address (depending on flow direction)
-      # with the one reported in the X-Forwarded-For HTTP header. This is
-      # helpful when reviewing alerts for traffic that is being reverse
-      # or forward proxied.
-      xff:
-        enabled: yes
-        # Two operation modes are available: "extra-data" and "overwrite".
-        mode: extra-data
-        # Two proxy deployments are supported: "reverse" and "forward". In
-        # a "reverse" deployment the IP address used is the last one, in a
-        # "forward" deployment the first IP address is used.
-        deployment: reverse
-        # Header name where the actual IP address will be reported. If more
-        # than one IP address is present, the last IP address will be the
-        # one taken into consideration.
-        header: X-Forwarded-For
-
-      types:
-        - alert:
-            payload: yes             # enable dumping payload in Base64
-            payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
-            payload-printable: yes   # enable dumping payload in printable (lossy) format
-            # packet: yes              # enable dumping of packet (without stream segments)
-            # metadata: no             # enable inclusion of app layer metadata with alert. Default yes
-            http-body: yes           # Requires metadata; enable dumping of HTTP body in Base64
-            http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format
-
-            # Enable the logging of tagged packets for rules using the
-            # "tag" keyword.
-            tagged-packets: yes
-            # Enable logging the final action taken on a packet by the engine
-            # (e.g: the alert may have action 'allowed' but the verdict be
-            # 'drop' due to another alert. That's the engine's verdict)
-            # verdict: yes
-        # app layer frames
-        - frame:
-            # disabled by default as this is very verbose.
-            enabled: no
-        - anomaly:
-            # Anomaly log records describe unexpected conditions such
-            # as truncated packets, packets with invalid IP/UDP/TCP
-            # length values, and other events that render the packet
-            # invalid for further processing or describe unexpected
-            # behavior on an established stream. Networks which
-            # experience high occurrences of anomalies may experience
-            # packet processing degradation.
-            #
-            # Anomalies are reported for the following:
-            # 1. Decode: Values and conditions that are detected while
-            # decoding individual packets. This includes invalid or
-            # unexpected values for low-level protocol lengths as well
-            # as stream related events (TCP 3-way handshake issues,
-            # unexpected sequence number, etc).
-            # 2. Stream: This includes stream related events (TCP
-            # 3-way handshake issues, unexpected sequence number,
-            # etc).
-            # 3. Application layer: These denote application layer
-            # specific conditions that are unexpected, invalid or are
-            # unexpected given the application monitoring state.
-            #
-            # By default, anomaly logging is enabled. When anomaly
-            # logging is enabled, applayer anomaly reporting is
-            # also enabled.
-            enabled: yes
-            #
-            # Choose one or more types of anomaly logging and whether to enable
-            # logging of the packet header for packet anomalies.
-            types:
-              # decode: no
-              # stream: no
-              # applayer: yes
-            #packethdr: no
-        - http:
-            extended: yes     # enable this for extended logging information
-            # custom allows additional HTTP fields to be included in eve-log.
-            # the example below adds three additional fields when uncommented
-            custom: [Accept-Encoding, Accept-Language, Authorization, Forwarded, From, Referer, Via]
-            # set this value to one and only one from {both, request, response}
-            # to dump all HTTP headers for every HTTP request and/or response
-            # dump-all-headers: none
-        - dns:
-            # This configuration uses the new DNS logging format,
-            # the old configuration is still available:
-            # https://docs.suricata.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
-
-            # As of Suricata 5.0, version 2 of the eve dns output
-            # format is the default.
-            #version: 2
-
-            # Enable/disable this logger. Default: enabled.
-            #enabled: yes
-
-            # Control logging of requests and responses:
-            # - requests: enable logging of DNS queries
-            # - responses: enable logging of DNS answers
-            # By default both requests and responses are logged.
-            #requests: no
-            #responses: no
-
-            # Format of answer logging:
-            # - detailed: array item per answer
-            # - grouped: answers aggregated by type
-            # Default: all
-            #formats: [detailed, grouped]
-
-            # DNS record types to log, based on the query type.
-            # Default: all.
-            #types: [a, aaaa, cname, mx, ns, ptr, txt]
-        - tls:
-            extended: yes     # enable this for extended logging information
-            # output TLS transaction where the session is resumed using a
-            # session id
-            #session-resumption: no
-            # custom controls which TLS fields that are included in eve-log
-            custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, ja3, ja3s]
-        - files:
-            force-magic: yes   # force logging magic on all logged files
-            # force logging of checksums, available hash functions are md5,
-            # sha1 and sha256
-            force-hash: [md5]
-        #- drop:
-        #    alerts: yes      # log alerts that caused drops
-        #    flows: all       # start or all: 'start' logs only a single drop
-        #                     # per flow direction. All logs each dropped pkt.
-            # Enable logging the final action taken on a packet by the engine
-            # (will show more information in case of a drop caused by 'reject')
-            # verdict: yes
-        - smtp:
-            extended: yes # enable this for extended logging information
-            # this includes: bcc, message-id, subject, x_mailer, user-agent
-            # custom fields logging from the list:
-            #  reply-to, bcc, message-id, subject, x-mailer, user-agent, received,
-            #  x-originating-ip, in-reply-to, references, importance, priority,
-            #  sensitivity, organization, content-md5, date
-            custom: [bcc, message-id, subject, x_mailer, user-agent, reply-to, received, x-originating-ip, in-reply-to, references, importance, priority, sensitivity, organization, content-md5, date, relays]
-            # output md5 of fields: body, subject
-            # for the body you need to set app-layer.protocols.smtp.mime.body-md5
-            # to yes
-            md5: [body, subject]
-
-        - dnp3
-        - ftp
-        - rdp
-        - nfs
-        - smb
-        - tftp
-        - ike
-        - dcerpc
-        - krb5
-        - bittorrent-dht
-        - snmp
-        - rfb
-        - sip
-        - quic
-        - dhcp:
-            enabled: no
-            # When extended mode is on, all DHCP messages are logged
-            # with full detail. When extended mode is off (the
-            # default), just enough information to map a MAC address
-            # to an IP address is logged.
-            extended: no
-        - ssh
-        - mqtt:
-            passwords: yes           # enable output of passwords
-        - http2
-        - pgsql:
-            enabled: yes
-            passwords: yes           # enable output of passwords. Disabled by default
-        #- stats:
-        #    totals: no       # stats for all threads merged together
-        #    threads: no       # per thread stats
-        #    deltas: no        # include delta values
-        # bi-directional flows
-        - flow
-        # uni-directional flows
-        #- netflow
-
-        # Metadata event type. Triggered whenever a pktvar is saved
-        # and will include the pktvars, flowvars, flowbits and
-        # flowints.
-        #- metadata
-
-        # EXPERIMENTAL per packet output giving TCP state tracking details
-        # including internal state, flags, etc.
-        # This output is experimental, meant for debugging and subject to
-        # change in both config and output without any notice.
-        #- stream:
-        #   all: false                      # log all TCP packets
-        #   event-set: false                # log packets that have a decoder/stream event
-        #   state-update: false             # log packets triggering a TCP state update
-        #   spurious-retransmission: false  # log spurious retransmission packets
-
-  # a line based log of HTTP requests (no alerts)
-  - http-log:
-      enabled: no
-      filename: http.log
-      append: yes
-      #extended: yes     # enable this for extended logging information
-      #custom: yes       # enable the custom logging format (defined by customformat)
-      #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P"
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # a line based log of TLS handshake parameters (no alerts)
-  - tls-log:
-      enabled: no  # Log TLS connections.
-      filename: tls.log # File to store TLS logs.
-      append: yes
-      #extended: yes     # Log extended information like fingerprint
-      #custom: yes       # enabled the custom logging format (defined by customformat)
-      #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D"
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-      # output TLS transaction where the session is resumed using a
-      # session id
-      #session-resumption: no
-
-  # output module to store certificates chain to disk
-  - tls-store:
-      enabled: no
-      #certs-log-dir: certs # directory to store the certificates files
-
-  # Packet log... log packets in pcap format. 3 modes of operation: "normal"
-  # "multi" and "sguil".
-  #
-  # In normal mode a pcap file "filename" is created in the default-log-dir,
-  # or as specified by "dir".
-  # In multi mode, a file is created per thread. This will perform much
-  # better, but will create multiple files where 'normal' would create one.
-  # In multi mode the filename takes a few special variables:
-  # - %n -- thread number
-  # - %i -- thread id
-  # - %t -- timestamp (secs or secs.usecs based on 'ts-format'
-  # E.g. filename: pcap.%n.%t
-  #
-  # Note that it's possible to use directories, but the directories are not
-  # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the
-  # per thread directory.
-  #
-  # Also note that the limit and max-files settings are enforced per thread.
-  # So the size limit when using 8 threads with 1000mb files and 2000 files
-  # is: 8*1000*2000 ~ 16TiB.
-  #
-  # In Sguil mode "dir" indicates the base directory. In this base dir the
-  # pcaps are created in the directory structure Sguil expects:
-  #
-  # $sguil-base-dir/YYYY-MM-DD/$filename.<timestamp>
-  #
-  # By default all packets are logged except:
-  # - TCP streams beyond stream.reassembly.depth
-  # - encrypted streams after the key exchange
-  #
-  - pcap-log:
-      enabled: no
-      filename: log.pcap
-
-      # File size limit.  Can be specified in kb, mb, gb.  Just a number
-      # is parsed as bytes.
-      limit: 1000mb
-
-      # If set to a value, ring buffer mode is enabled. Will keep maximum of
-      # "max-files" of size "limit"
-      max-files: 2000
-
-      # Compression algorithm for pcap files. Possible values: none, lz4.
-      # Enabling compression is incompatible with the sguil mode. Note also
-      # that on Windows, enabling compression will *increase* disk I/O.
-      compression: none
-
-      # Further options for lz4 compression. The compression level can be set
-      # to a value between 0 and 16, where higher values result in higher
-      # compression.
-      #lz4-checksum: no
-      #lz4-level: 0
-
-      mode: normal # normal, multi or sguil.
-
-      # Directory to place pcap files. If not provided the default log
-      # directory will be used. Required for "sguil" mode.
-      #dir: /nsm_data/
-
-      #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec
-      use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets
-      honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stop being logged.
-      # Use "all" to log all packets or use "alerts" to log only alerted packets and flows or "tag"
-      # to log only flow tagged via the "tag" keyword
-      #conditional: all
-
-  # a full alert log containing much information for signature writers
-  # or for investigating suspected false positives.
-  - alert-debug:
-      enabled: no
-      filename: alert-debug.log
-      append: yes
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # Stats.log contains data from various counters of the Suricata engine.
-  - stats:
-      enabled: no
-      filename: stats.log
-      append: yes       # append to file (yes) or overwrite it (no)
-      totals: yes       # stats for all threads merged together
-      threads: no       # per thread stats
-      #null-values: yes  # print counters that have value 0. Default: no
-
-  # a line based alerts log similar to fast.log into syslog
-  - syslog:
-      enabled: no
-      # reported identity to syslog. If omitted the program name (usually
-      # suricata) will be used.
-      #identity: "suricata"
-      facility: local5
-      #level: Info ## possible levels: Emergency, Alert, Critical,
-                   ## Error, Warning, Notice, Info, Debug
-
-  # Output module for storing files on disk. Files are stored in
-  # directory names consisting of the first 2 characters of the
-  # SHA256 of the file. Each file is given its SHA256 as a filename.
-  #
-  # When a duplicate file is found, the timestamps on the existing file
-  # are updated.
-  #
-  # Unlike the older filestore, metadata is not written by default
-  # as each file should already have a "fileinfo" record in the
-  # eve-log. If write-fileinfo is set to yes, then each file will have
-  # one more associated .json files that consist of the fileinfo
-  # record. A fileinfo file will be written for each occurrence of the
-  # file seen using a filename suffix to ensure uniqueness.
-  #
-  # To prune the filestore directory see the "suricatactl filestore
-  # prune" command which can delete files over a certain age.
-  - file-store:
-      version: 2
-      enabled: no
-
-      # Set the directory for the filestore. Relative pathnames
-      # are contained within the "default-log-dir".
-      #dir: filestore
-
-      # Write out a fileinfo record for each occurrence of a file.
-      # Disabled by default as each occurrence is already logged
-      # as a fileinfo record to the main eve-log.
-      #write-fileinfo: yes
-
-      # Force storing of all files. Default: no.
-      #force-filestore: yes
-
-      # Override the global stream-depth for sessions in which we want
-      # to perform file extraction. Set to 0 for unlimited; otherwise,
-      # must be greater than the global stream-depth value to be used.
-      #stream-depth: 0
-
-      # Uncomment the following variable to define how many files can
-      # remain open for filestore by Suricata. Default value is 0 which
-      # means files get closed after each write to the file.
-      #max-open-files: 1000
-
-      # Force logging of checksums: available hash functions are md5,
-      # sha1 and sha256. Note that SHA256 is automatically forced by
-      # the use of this output module as it uses the SHA256 as the
-      # file naming scheme.
-      #force-hash: [sha1, md5]
-      # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled
-      # HTTP X-Forwarded-For support by adding an extra field or overwriting
-      # the source or destination IP address (depending on flow direction)
-      # with the one reported in the X-Forwarded-For HTTP header. This is
-      # helpful when reviewing alerts for traffic that is being reverse
-      # or forward proxied.
-      xff:
-        enabled: no
-        # Two operation modes are available, "extra-data" and "overwrite".
-        mode: extra-data
-        # Two proxy deployments are supported, "reverse" and "forward". In
-        # a "reverse" deployment the IP address used is the last one, in a
-        # "forward" deployment the first IP address is used.
-        deployment: reverse
-        # Header name where the actual IP address will be reported. If more
-        # than one IP address is present, the last IP address will be the
-        # one taken into consideration.
-        header: X-Forwarded-For
-
-  # Log TCP data after stream normalization
-  # Two types: file or dir:
-  #     - file logs into a single logfile.
-  #     - dir creates 2 files per TCP session and stores the raw TCP
-  #            data into them.
-  # Use 'both' to enable both file and dir modes.
-  #
-  # Note: limited by "stream.reassembly.depth"
-  - tcp-data:
-      enabled: no
-      type: file
-      filename: tcp-data.log
-
-  # Log HTTP body data after normalization, de-chunking and unzipping.
-  # Two types: file or dir.
-  #     - file logs into a single logfile.
-  #     - dir creates 2 files per HTTP session and stores the
-  #           normalized data into them.
-  # Use 'both' to enable both file and dir modes.
-  #
-  # Note: limited by the body limit settings
-  - http-body-data:
-      enabled: no
-      type: file
-      filename: http-data.log
-
-  # Lua Output Support - execute lua script to generate alert and event
-  # output.
-  # Documented at:
-  # https://docs.suricata.io/en/latest/output/lua-output.html
-  - lua:
-      enabled: no
-      #scripts-dir: /etc/suricata/lua-output/
-      scripts:
-      #   - script1.lua
-
-# Logging configuration.  This is not about logging IDS alerts/events, but
-# output about what Suricata is doing, like startup messages, errors, etc.
-logging:
-  # The default log level: can be overridden in an output section.
-  # Note that debug level logging will only be emitted if Suricata was
-  # compiled with the --enable-debug configure option.
-  #
-  # This value is overridden by the SC_LOG_LEVEL env var.
-  default-log-level: notice
-
-  # The default output format.  Optional parameter, should default to
-  # something reasonable if not provided.  Can be overridden in an
-  # output section.  You can leave this out to get the default.
-  #
-  # This console log format value can be overridden by the SC_LOG_FORMAT env var.
-  #default-log-format: "%D: %S: %M"
-  #
-  # For the pre-7.0 log format use:
-  #default-log-format: "[%i] %t [%S] - (%f:%l) <%d> (%n) -- "
-
-  # A regex to filter output.  Can be overridden in an output section.
-  # Defaults to empty (no filter).
-  #
-  # This value is overridden by the SC_LOG_OP_FILTER env var.
-  default-output-filter:
-
-  # Requires libunwind to be available when Suricata is configured and built.
-  # If a signal unexpectedly terminates Suricata, displays a brief diagnostic
-  # message with the offending stacktrace if enabled.
-  #stacktrace-on-signal: on
-
-  # Define your logging outputs.  If none are defined, or they are all
-  # disabled you will get the default: console output.
-  outputs:
-  - console:
-      enabled: yes
-      # type: json
-  - file:
-      enabled: yes
-      level: info
-      filename: /var/log/suricata/suricata.log
-      # format: "[%i - %m] %z %d: %S: %M"
-      # type: json
-  - syslog:
-      enabled: no
-      facility: local5
-      format: "[%i] <%d> -- "
-      # type: json
-
-
-##
-## Step 3: Configure common capture settings
-##
-## See "Advanced Capture Options" below for more options, including Netmap
-## and PF_RING.
-##
-
-# Linux high speed capture support
-af-packet:
-  - interface: eth0
-    # Number of receive threads. "auto" uses the number of cores
-    #threads: auto
-    # Default clusterid. AF_PACKET will load balance packets based on flow.
-    cluster-id: 99
-    # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
-    # This is only supported for Linux kernel > 3.1
-    # possible value are:
-    #  * cluster_flow: all packets of a given flow are sent to the same socket
-    #  * cluster_cpu: all packets treated in kernel by a CPU are sent to the same socket
-    #  * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
-    #  socket. Requires at least Linux 3.14.
-    #  * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
-    #  more info.
-    # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
-    # with capture card using RSS (requires cpu affinity tuning and system IRQ tuning)
-    # cluster_rollover has been deprecated; if used, it'll be replaced with cluster_flow.
-    cluster-type: cluster_flow
-    # In some fragmentation cases, the hash can not be computed. If "defrag" is set
-    # to yes, the kernel will do the needed defragmentation before sending the packets.
-    defrag: yes
-    # To use the ring feature of AF_PACKET, set 'use-mmap' to yes
-    #use-mmap: yes
-    # Lock memory map to avoid it being swapped. Be careful that over
-    # subscribing could lock your system
-    #mmap-locked: yes
-    # Use tpacket_v3 capture mode, only active if use-mmap is true
-    # Don't use it in IPS or TAP mode as it causes severe latency
-    #tpacket-v3: yes
-    # Ring size will be computed with respect to "max-pending-packets" and number
-    # of threads. You can set manually the ring size in number of packets by setting
-    # the following value. If you are using flow "cluster-type" and have really network
-    # intensive single-flow you may want to set the "ring-size" independently of the number
-    # of threads:
-    #ring-size: 2048
-    # Block size is used by tpacket_v3 only. It should set to a value high enough to contain
-    # a decent number of packets. Size is in bytes so please consider your MTU. It should be
-    # a power of 2 and it must be multiple of page size (usually 4096).
-    #block-size: 32768
-    # tpacket_v3 block timeout: an open block is passed to userspace if it is not
-    # filled after block-timeout milliseconds.
-    #block-timeout: 10
-    # On busy systems, set it to yes to help recover from a packet drop
-    # phase. This will result in some packets (at max a ring flush) not being inspected.
-    #use-emergency-flush: yes
-    # recv buffer size, increased value could improve performance
-    # buffer-size: 32768
-    # Set to yes to disable promiscuous mode
-    # disable-promisc: no
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - kernel: use indication sent by kernel for each packet (default)
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used.
-    # Warning: 'capture.checksum-validation' must be set to yes to have any validation
-    #checksum-checks: kernel
-    # BPF filter to apply to this interface. The pcap filter syntax applies here.
-    #bpf-filter: port 80 or udp
-    # You can use the following variables to activate AF_PACKET tap or IPS mode.
-    # If copy-mode is set to ips or tap, the traffic coming to the current
-    # interface will be copied to the copy-iface interface. If 'tap' is set, the
-    # copy is complete. If 'ips' is set, the packet matching a 'drop' action
-    # will not be copied.
-    #copy-mode: ips
-    #copy-iface: eth1
-    #  For eBPF and XDP setup including bypass, filter and load balancing, please
-    #  see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
-
-  # Put default values here. These will be used for an interface that is not
-  # in the list above.
-  - interface: default
-    #threads: auto
-    #use-mmap: no
-    #tpacket-v3: yes
-
-# Linux high speed af-xdp capture support
-af-xdp:
-  - interface: default
-    # Number of receive threads. "auto" uses least between the number
-    # of cores and RX queues
-    #threads: auto
-    #disable-promisc: false
-    # XDP_DRV mode can be chosen when the driver supports XDP
-    # XDP_SKB mode can be chosen when the driver does not support XDP
-    # Possible values are:
-    #  - drv: enable XDP_DRV mode
-    #  - skb: enable XDP_SKB mode
-    #  - none: disable (kernel in charge of applying mode)
-    #force-xdp-mode: none
-    # During socket binding the kernel will attempt zero-copy, if this
-    # fails it will fallback to copy. If this fails, the bind fails.
-    # The bind can be explicitly configured using the option below.
-    # If configured, the bind will fail if not successful (no fallback).
-    # Possible values are:
-    #  - zero: enable zero-copy mode
-    #  - copy: enable copy mode
-    #  - none: disable (kernel in charge of applying mode)
-    #force-bind-mode: none
-    # Memory alignment mode can vary between two modes, aligned and
-    # unaligned chunk modes. By default, aligned chunk mode is selected.
-    # select 'yes' to enable unaligned chunk mode.
-    # Note: unaligned chunk mode uses hugepages, so the required number
-    # of pages must be available.
-    #mem-unaligned: no
-    # The following options configure the prefer-busy-polling socket
-    # options. The polling time and budget can be edited here.
-    # Possible values are:
-    #  - yes: enable (default)
-    #  - no: disable
-    #enable-busy-poll: yes
-    # busy-poll-time sets the approximate time in microseconds to busy
-    # poll on a blocking receive when there is no data.
-    #busy-poll-time: 20
-    # busy-poll-budget is the budget allowed for packet batches
-    #busy-poll-budget: 64
-    # These two tunables are used to configure the Linux OS's NAPI
-    # context. Their purpose is to defer enabling of interrupts and
-    # instead schedule the NAPI context from a watchdog timer.
-    # The softirq NAPI will exit early, allowing busy polling to be
-    # performed. Successfully setting these tunables alongside busy-polling
-    # should improve performance.
-    # Defaults are:
-    #gro-flush-timeout: 2000000
-    #napi-defer-hard-irq: 2
-
-dpdk:
-  eal-params:
-    proc-type: primary
-
-  # DPDK capture support
-  # RX queues (and TX queues in IPS mode) are assigned to cores in 1:1 ratio
-  interfaces:
-    - interface: 0000:3b:00.0 # PCIe address of the NIC port
-      # Threading: possible values are either "auto" or number of threads
-      # - auto takes all cores
-      # in IPS mode it is required to specify the number of cores and the numbers on both interfaces must match
-      threads: auto
-      promisc: true # promiscuous mode - capture all packets
-      multicast: true # enables also detection on multicast packets
-      checksum-checks: true # if Suricata should validate checksums
-      checksum-checks-offload: true # if possible offload checksum validation to the NIC (saves Suricata resources)
-      mtu: 1500 # Set MTU of the device in bytes
-      # rss-hash-functions: 0x0 # advanced configuration option, use only if you use untested NIC card and experience RSS warnings,
-      # For `rss-hash-functions` use hexadecimal 0x01ab format to specify RSS hash function flags - DumpRssFlags can help (you can see output if you use -vvv option during Suri startup)
-      # setting auto to rss_hf sets the default RSS hash functions (based on IP addresses)
-
-      # To approximately calculate required amount of space (in bytes) for interface's mempool: mempool-size * mtu
-      # Make sure you have enough allocated hugepages.
-      # The optimum size for the packet memory pool (in terms of memory usage) is power of two minus one: n = (2^q - 1)
-      mempool-size: 65535 # The number of elements in the mbuf pool
-
-      # Mempool cache size must be lower or equal to:
-      #     - RTE_MEMPOOL_CACHE_MAX_SIZE (by default 512) and
-      #     - "mempool-size / 1.5"
-      # It is advised to choose cache_size to have "mempool-size modulo cache_size == 0".
-      # If this is not the case, some elements will always stay in the pool and will never be used.
-      # The cache can be disabled if the cache_size argument is set to 0, can be useful to avoid losing objects in cache
-      # If the value is empty or set to "auto", Suricata will attempt to set cache size of the mempool to a value
-      # that matches the previously mentioned recommendations
-      mempool-cache-size: 257
-      rx-descriptors: 1024
-      tx-descriptors: 1024
-      #
-      # IPS mode for Suricata works in 3 modes - none, tap, ips
-      # - none: IDS mode only - disables IPS functionality (does not further forward packets)
-      # - tap: forwards all packets and generates alerts (omits DROP action) This is not DPDK TAP
-      # - ips: the same as tap mode but it also drops packets that are flagged by rules to be dropped
-      copy-mode: none
-      copy-iface: none # or PCIe address of the second interface
-
-    - interface: default
-      threads: auto
-      promisc: true
-      multicast: true
-      checksum-checks: true
-      checksum-checks-offload: true
-      mtu: 1500
-      rss-hash-functions: auto
-      mempool-size: 65535
-      mempool-cache-size: 257
-      rx-descriptors: 1024
-      tx-descriptors: 1024
-      copy-mode: none
-      copy-iface: none
-
-
-# Cross platform libpcap capture support
-pcap:
-  - interface: eth0
-    # On Linux, pcap will try to use mmap'ed capture and will use "buffer-size"
-    # as total memory used by the ring. So set this to something bigger
-    # than 1% of your bandwidth.
-    #buffer-size: 16777216
-    #bpf-filter: "tcp and port 25"
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used. (default)
-    # Warning: 'capture.checksum-validation' must be set to yes to have any validation
-    #checksum-checks: auto
-    # With some accelerator cards using a modified libpcap (like Myricom), you
-    # may want to have the same number of capture threads as the number of capture
-    # rings. In this case, set up the threads variable to N to start N threads
-    # listening on the same interface.
-    #threads: 16
-    # set to no to disable promiscuous mode:
-    #promisc: no
-    # set snaplen, if not set it defaults to MTU if MTU can be known
-    # via ioctl call and to full capture if not.
-    #snaplen: 1518
-  # Put default values here
-  - interface: default
-    #checksum-checks: auto
-
-# Settings for reading pcap files
-pcap-file:
-  # Possible values are:
-  #  - yes: checksum validation is forced
-  #  - no: checksum validation is disabled
-  #  - auto: Suricata uses a statistical approach to detect when
-  #  checksum off-loading is used. (default)
-  # Warning: 'checksum-validation' must be set to yes to have checksum tested
-  checksum-checks: auto
-
-# See "Advanced Capture Options" below for more options, including Netmap
-# and PF_RING.
-
-
-##
-## Step 4: App Layer Protocol configuration
-##
-
-# Configure the app-layer parsers.
-#
-# The error-policy setting applies to all app-layer parsers. Values can be
-# "drop-flow", "pass-flow", "bypass", "drop-packet", "pass-packet", "reject" or
-# "ignore" (the default).
-#
-# The protocol's section details each protocol.
-#
-# The option "enabled" takes 3 values - "yes", "no", "detection-only".
-# "yes" enables both detection and the parser, "no" disables both, and
-# "detection-only" enables protocol detection only (parser disabled).
-app-layer:
-  # error-policy: ignore
-  protocols:
-    telnet:
-      enabled: yes
-    rfb:
-      enabled: yes
-      detection-ports:
-        dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
-    mqtt:
-      enabled: yes
-      # max-msg-length: 1mb
-      # subscribe-topic-match-limit: 100
-      # unsubscribe-topic-match-limit: 100
-      # Maximum number of live MQTT transactions per flow
-      # max-tx: 4096
-    krb5:
-      enabled: yes
-    bittorrent-dht:
-      enabled: yes
-    snmp:
-      enabled: yes
-    ike:
-      enabled: yes
-    tls:
-      enabled: yes
-      detection-ports:
-        dp: 443
-
-      # Generate JA3 fingerprint from client hello. If not specified it
-      # will be disabled by default, but enabled if rules require it.
-      ja3-fingerprints: yes
-
-      # What to do when the encrypted communications start:
-      # - default: keep tracking TLS session, check for protocol anomalies,
-      #            inspect tls_* keywords. Disables inspection of unmodified
-      #            'content' signatures.
-      # - bypass:  stop processing this flow as much as possible. No further
-      #            TLS parsing and inspection. Offload flow bypass to kernel
-      #            or hardware if possible.
-      # - full:    keep tracking and inspection as normal. Unmodified content
-      #            keyword signatures are inspected as well.
-      #
-      # For best performance, select 'bypass'.
-      #
-      #encryption-handling: default
-
-    pgsql:
-      enabled: yes
-      # Stream reassembly size for PostgreSQL. By default, track it completely.
-      stream-depth: 0
-      # Maximum number of live PostgreSQL transactions per flow
-      # max-tx: 1024
-    dcerpc:
-      enabled: yes
-      # Maximum number of live DCERPC transactions per flow
-      # max-tx: 1024
-    ftp:
-      enabled: yes
-      # memcap: 64mb
-    rdp:
-      enabled: yes
-    ssh:
-      enabled: yes
-      hassh: yes
-    http2:
-      enabled: yes
-      # Maximum number of live HTTP2 streams in a flow
-      #max-streams: 4096
-      # Maximum headers table size
-      #max-table-size: 65536
-    smtp:
-      enabled: yes
-      raw-extraction: no
-      # Configure SMTP-MIME Decoder
-      mime:
-        # Decode MIME messages from SMTP transactions
-        # (may be resource intensive)
-        # This field supersedes all others because it turns the entire
-        # process on or off
-        decode-mime: yes
-
-        # Decode MIME entity bodies (ie. Base64, quoted-printable, etc.)
-        decode-base64: yes
-        decode-quoted-printable: yes
-
-        # Maximum bytes per header data value stored in the data structure
-        # (default is 2000)
-        header-value-depth: 2000
-
-        # Extract URLs and save in state data structure
-        extract-urls: yes
-        # Scheme of URLs to extract
-        # (default is [http])
-        extract-urls-schemes: [http, https, ftp, mailto]
-        # Log the scheme of URLs that are extracted
-        # (default is no)
-        log-url-scheme: yes
-        # Set to yes to compute the md5 of the mail body. You will then
-        # be able to journalize it.
-        body-md5: yes
-      # Configure inspected-tracker for file_data keyword
-      inspected-tracker:
-        content-limit: 100000
-        content-inspect-min-size: 32768
-        content-inspect-window: 4096
-    imap:
-      enabled: detection-only
-    smb:
-      enabled: yes
-      detection-ports:
-        dp: 139, 445
-      # Maximum number of live SMB transactions per flow
-      # max-tx: 1024
-
-      # Stream reassembly size for SMB streams. By default track it completely.
-      #stream-depth: 0
-
-    nfs:
-      enabled: yes
-      # max-tx: 1024
-    tftp:
-      enabled: yes
-    dns:
-      tcp:
-        enabled: yes
-        detection-ports:
-          dp: 53
-      udp:
-        enabled: yes
-        detection-ports:
-          dp: 53
-    http:
-      enabled: yes
-
-      # Byte Range Containers default settings
-      # byterange:
-      #   memcap: 100mb
-      #   timeout: 60
-
-      # memcap:                   Maximum memory capacity for HTTP
-      #                           Default is unlimited, values can be 64mb, e.g.
-
-      # default-config:           Used when no server-config matches
-      #   personality:            List of personalities used by default
-      #   request-body-limit:     Limit reassembly of request body for inspection
-      #                           by http_client_body & pcre /P option.
-      #   response-body-limit:    Limit reassembly of response body for inspection
-      #                           by file_data, http_server_body & pcre /Q option.
-      #
-      #   For advanced options, see the user guide
-
-
-      # server-config:            List of server configurations to use if address matches
-      #   address:                List of IP addresses or networks for this block
-      #   personality:            List of personalities used by this block
-      #
-      #                           Then, all the fields from default-config can be overloaded
-      #
-      # Currently Available Personalities:
-      #   Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
-      #   IIS_7_0, IIS_7_5, Apache_2
-      libhtp:
-         default-config:
-           personality: IDS
-
-           # Can be specified in kb, mb, gb.  Just a number indicates
-           # it's in bytes.
-           request-body-limit: 100kb
-           response-body-limit: 100kb
-
-           # inspection limits
-           request-body-minimal-inspect-size: 32kb
-           request-body-inspect-window: 4kb
-           response-body-minimal-inspect-size: 40kb
-           response-body-inspect-window: 16kb
-
-           # response body decompression (0 disables)
-           response-body-decompress-layer-limit: 2
-
-           # auto will use http-body-inline mode in IPS mode, yes or no set it statically
-           http-body-inline: auto
-
-           # Decompress SWF files. Disabled by default.
-           # Two types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
-           # compress-depth:
-           # Specifies the maximum amount of data to decompress,
-           # set 0 for unlimited.
-           # decompress-depth:
-           # Specifies the maximum amount of decompressed data to obtain,
-           # set 0 for unlimited.
-           swf-decompression:
-             enabled: no
-             type: both
-             compress-depth: 100kb
-             decompress-depth: 100kb
-
-           # Use a random value for inspection sizes around the specified value.
-           # This lowers the risk of some evasion techniques but could lead
-           # to detection change between runs. It is set to 'yes' by default.
-           #randomize-inspection-sizes: yes
-           # If "randomize-inspection-sizes" is active, the value of various
-           # inspection size will be chosen from the [1 - range%, 1 + range%]
-           # range
-           # Default value of "randomize-inspection-range" is 10.
-           #randomize-inspection-range: 10
-
-           # decoding
-           double-decode-path: no
-           double-decode-query: no
-
-           # Can enable LZMA decompression
-           #lzma-enabled: false
-           # Memory limit usage for LZMA decompression dictionary
-           # Data is decompressed until dictionary reaches this size
-           #lzma-memlimit: 1mb
-           # Maximum decompressed size with a compression ratio
-           # above 2048 (only LZMA can reach this ratio, deflate cannot)
-           #compression-bomb-limit: 1mb
-           # Maximum time spent decompressing a single transaction in usec
-           #decompression-time-limit: 100000
-
-         server-config:
-
-           #- apache:
-           #    address: [192.168.1.0/24, 127.0.0.0/8, "::1"]
-           #    personality: Apache_2
-           #    # Can be specified in kb, mb, gb.  Just a number indicates
-           #    # it's in bytes.
-           #    request-body-limit: 4096
-           #    response-body-limit: 4096
-           #    double-decode-path: no
-           #    double-decode-query: no
-
-           #- iis7:
-           #    address:
-           #      - 192.168.0.0/24
-           #      - 192.168.10.0/24
-           #    personality: IIS_7_0
-           #    # Can be specified in kb, mb, gb.  Just a number indicates
-           #    # it's in bytes.
-           #    request-body-limit: 4096
-           #    response-body-limit: 4096
-           #    double-decode-path: no
-           #    double-decode-query: no
-
-    # Note: Modbus probe parser is minimalist due to the limited usage in the field.
-    # Only Modbus message length (greater than Modbus header length)
-    # and protocol ID (equal to 0) are checked in probing parser
-    # It is important to enable detection port and define Modbus port
-    # to avoid false positives
-    modbus:
-      # How many unanswered Modbus requests are considered a flood.
-      # If the limit is reached, the app-layer-event:modbus.flooded; will match.
-      #request-flood: 500
-
-      enabled: yes
-      detection-ports:
-        dp: 502
-      # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it
-      # is recommended to keep the TCP connection opened with a remote device
-      # and not to open and close it for each MODBUS/TCP transaction. In that
-      # case, it is important to set the depth of the stream reassembling as
-      # unlimited (stream.reassembly.depth: 0)
-
-      # Stream reassembly size for modbus. By default track it completely.
-      stream-depth: 0
-
-    # DNP3
-    dnp3:
-      enabled: yes
-      detection-ports:
-        dp: 20000
-
-    # SCADA EtherNet/IP and CIP protocol support
-    enip:
-      enabled: yes
-      detection-ports:
-        dp: 44818
-        sp: 44818
-
-    ntp:
-      enabled: yes
-
-    quic:
-      enabled: yes
-
-    dhcp:
-      enabled: no
-
-    sip:
-      enabled: yes
-
-# Limit for the maximum number of asn1 frames to decode (default 256)
-asn1-max-frames: 256
-
-# Datasets default settings
-datasets:
-  # Default fallback memcap and hashsize values for datasets in case these
-  # were not explicitly defined.
-  defaults:
-    #memcap: 100mb
-    #hashsize: 2048
-
-  rules:
-    # Set to true to allow absolute filenames and filenames that use
-    # ".." components to reference parent directories in rules that specify
-    # their filenames.
-    #allow-absolute-filenames: false
-
-    # Allow datasets in rules write access for "save" and
-    # "state". This is enabled by default, however write access is
-    # limited to the data directory.
-    #allow-write: true
-
-##############################################################################
-##
-## Advanced settings below
-##
-##############################################################################
-
-##
-## Run Options
-##
-
-# Run Suricata with a specific user-id and group-id:
-run-as:
-  user: suri
-  group: suri
-
-security:
-  # if true, prevents process creation from Suricata by calling
-  # setrlimit(RLIMIT_NPROC, 0)
-  limit-noproc: true
-  # Use landlock security module under Linux
-  landlock:
-    enabled: no
-    directories:
-      #write:
-      #  - /var/run/
-      # /usr and /etc folders are added to read list to allow
-      # file magic to be used.
-      read:
-        - /usr/
-        - /etc/
-        - /etc/suricata/
-
-  lua:
-    # Allow Lua rules. Disabled by default.
-    #allow-rules: false
-
-# Some logging modules will use that name in event as identifier. The default
-# value is the hostname
-#sensor-name: suricata
-
-# Default location of the pid file. The pid file is only used in
-# daemon mode (start Suricata with -D). If not running in daemon mode
-# the --pidfile command line option must be used to create a pid file.
-#pid-file: /var/run/suricata.pid
-
-# Daemon working directory
-# Suricata will change directory to this one if provided
-# Default: "/"
-#daemon-directory: "/"
-
-# Umask.
-# Suricata will use this umask if it is provided. By default it will use the
-# umask passed on by the shell.
-#umask: 022
-
-# Suricata core dump configuration. Limits the size of the core dump file to
-# approximately max-dump. The actual core dump size will be a multiple of the
-# page size. Core dumps that would be larger than max-dump are truncated. On
-# Linux, the actual core dump size may be a few pages larger than max-dump.
-# Setting max-dump to 0 disables core dumping.
-# Setting max-dump to 'unlimited' will give the full core dump file.
-# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size
-# to be 'unlimited'.
-
-coredump:
-  max-dump: unlimited
-
-# If the Suricata box is a router for the sniffed networks, set it to 'router'. If
-# it is a pure sniffing setup, set it to 'sniffer-only'.
-# If set to auto, the variable is internally switched to 'router' in IPS mode
-# and 'sniffer-only' in IDS mode.
-# This feature is currently only used by the reject* keywords.
-host-mode: auto
-
-# Number of packets preallocated per thread. The default is 1024. A higher number 
-# will make sure each CPU will be more easily kept busy, but may negatively 
-# impact caching.
-#max-pending-packets: 1024
-
-# Runmode the engine should use. Please check --list-runmodes to get the available
-# runmodes for each packet acquisition method. Default depends on selected capture
-# method. 'workers' generally gives best performance.
-#runmode: autofp
-
-# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
-#
-# Supported schedulers are:
-#
-# hash     - Flow assigned to threads using the 5-7 tuple hash.
-# ippair   - Flow assigned to threads using addresses only.
-# ftp-hash - Flow assigned to threads using the hash, except for FTP, so that
-#            ftp-data flows will be handled by the same thread
-#
-#autofp-scheduler: hash
-
-# Preallocated size for each packet. Default is 1514 which is the classical
-# size for pcap on Ethernet. You should adjust this value to the highest
-# packet size (MTU + hardware header) on your system.
-#default-packet-size: 1514
-
-# Unix command socket that can be used to pass commands to Suricata.
-# An external tool can then connect to get information from Suricata
-# or trigger some modifications of the engine. Set enabled to yes
-# to activate the feature. In auto mode, the feature will only be
-# activated in live capture mode. You can use the filename variable to set
-# the file name of the socket.
-unix-command:
-  enabled: auto
-  #filename: custom.socket
-
-# Magic file. The extension .mgc is added to the value here.
-magic-file: /usr/share/misc/magic.mgc
-#magic-file: 
-
-# GeoIP2 database file. Specify path and filename of GeoIP2 database
-# if using rules with "geoip" rule option.
-#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
-
-legacy:
-  uricontent: enabled
-
-##
-## Detection settings
-##
-
-# Set the order of alerts based on actions
-# The default order is pass, drop, reject, alert
-# action-order:
-#   - pass
-#   - drop
-#   - reject
-#   - alert
-
-# Define maximum number of possible alerts that can be triggered for the same
-# packet. Default is 15
-#packet-alert-max: 15
-
-# Exception Policies
-#
-# Define a common behavior for all exception policies.
-# In IPS mode, the default is drop-flow. For cases when that's not possible, the
-# engine will fall to drop-packet. To fallback to old behavior (setting each of
-# them individually, or ignoring all), set this to ignore.
-# All values available for exception policies can be used, and there is one
-# extra option: auto - which means drop-flow or drop-packet (as explained above)
-# in IPS mode, and ignore in IDS mode. Exception policy values are: drop-packet,
-# drop-flow, reject, bypass, pass-packet, pass-flow, ignore (disable).
-exception-policy: auto
-
-# IP Reputation
-#reputation-categories-file: /etc/suricata/iprep/categories.txt
-#default-reputation-path: /etc/suricata/iprep
-#reputation-files:
-# - reputation.list
-
-# When run with the option --engine-analysis, the engine will read each of
-# the parameters below, and print reports for each of the enabled sections
-# and exit.  The reports are printed to a file in the default log dir
-# given by the parameter "default-log-dir", with engine reporting
-# subsection below printing reports in its own report file.
-engine-analysis:
-  # enables printing reports for fast-pattern for every rule.
-  rules-fast-pattern: yes
-  # enables printing reports for each rule
-  rules: yes
-
-#recursion and match limits for PCRE where supported
-pcre:
-  match-limit: 3500
-  match-limit-recursion: 1500
-
-##
-## Advanced Traffic Tracking and Reconstruction Settings
-##
-
-# Host specific policies for defragmentation and TCP stream
-# reassembly. The host OS lookup is done using a radix tree, just
-# like a routing table so the most specific entry matches.
-host-os-policy:
-  # Make the default policy windows.
-  windows: [0.0.0.0/0]
-  bsd: []
-  bsd-right: []
-  old-linux: []
-  linux: []
-  old-solaris: []
-  solaris: []
-  hpux10: []
-  hpux11: []
-  irix: []
-  macos: []
-  vista: []
-  windows2k3: []
-
-# Defrag settings:
-
-# The memcap-policy value can be "drop-packet", "pass-packet", "reject" or
-# "ignore" (which is the default).
-defrag:
-  memcap: 32mb
-  # memcap-policy: ignore
-  hash-size: 65536
-  trackers: 65535 # number of defragmented flows to follow
-  max-frags: 65535 # number of fragments to keep (higher than trackers)
-  prealloc: yes
-  timeout: 60
-
-# Enable defrag per host settings
-#  host-config:
-#
-#    - dmz:
-#        timeout: 30
-#        address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"]
-#
-#    - lan:
-#        timeout: 45
-#        address:
-#          - 192.168.0.0/24
-#          - 192.168.10.0/24
-#          - 172.16.14.0/24
-
-# Flow settings:
-# By default, the reserved memory (memcap) for flows is 32MB. This is the limit
-# for flow allocation inside the engine. You can change this value to allow
-# more memory usage for flows.
-# The hash-size determines the size of the hash used to identify flows inside
-# the engine, and by default the value is 65536.
-# At startup, the engine can preallocate a number of flows, to get better
-# performance. The number of flows preallocated is 10000 by default.
-# emergency-recovery is the percentage of flows that the engine needs to
-# prune before clearing the emergency state. The emergency state is activated
-# when the memcap limit is reached, allowing new flows to be created, but
-# pruning them with the emergency timeouts (they are defined below).
-# If the memcap is reached, the engine will try to prune flows
-# with the default timeouts. If it doesn't find a flow to prune, it will set
-# the emergency bit and it will try again with more aggressive timeouts.
-# If that doesn't work, then it will try to kill the oldest flows using
-# last time seen flows.
-# The memcap can be specified in kb, mb, gb.  Just a number indicates it's
-# in bytes.
-# The memcap-policy can be "drop-packet", "pass-packet", "reject" or "ignore"
-# (which is the default).
-
-flow:
-  memcap: 128mb
-  #memcap-policy: ignore
-  hash-size: 65536
-  prealloc: 10000
-  emergency-recovery: 30
-  #managers: 1 # default to one flow manager
-  #recyclers: 1 # default to one flow recycler thread
-
-# This option controls the use of VLAN ids in the flow (and defrag)
-# hashing. Normally this should be enabled, but in some (broken)
-# setups where both sides of a flow are not tagged with the same VLAN
-# tag, we can ignore the VLAN id's in the flow hashing.
-vlan:
-  use-for-tracking: true
-
-# This option controls the use of livedev ids in the flow (and defrag)
-# hashing. This is enabled by default and should be disabled if
-# multiple live devices are used to capture traffic from the same network
-livedev:
-  use-for-tracking: true
-
-# Specific timeouts for flows. Here you can specify the timeouts that the
-# active flows will wait to transit from the current state to another, on each
-# protocol. The value of "new" determines the seconds to wait after a handshake or
-# stream startup before the engine frees the data of that flow it doesn't
-# change the state to established (usually if we don't receive more packets
-# of that flow). The value of "established" is the amount of
-# seconds that the engine will wait to free the flow if that time elapses
-# without receiving new packets or closing the connection. "closed" is the
-# amount of time to wait after a flow is closed (usually zero). "bypassed"
-# timeout controls locally bypassed flows. For these flows we don't do any other
-# tracking. If no packets have been seen after this timeout, the flow is discarded.
-#
-# There's an emergency mode that will become active under attack circumstances,
-# making the engine to check flow status faster. This configuration variables
-# use the prefix "emergency-" and work similar as the normal ones.
-# Some timeouts doesn't apply to all the protocols, like "closed", for udp and
-# icmp.
-
-flow-timeouts:
-
-  default:
-    new: 30
-    established: 300
-    closed: 0
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-closed: 0
-    emergency-bypassed: 50
-  tcp:
-    new: 60
-    established: 600
-    closed: 60
-    bypassed: 100
-    emergency-new: 5
-    emergency-established: 100
-    emergency-closed: 10
-    emergency-bypassed: 50
-  udp:
-    new: 30
-    established: 300
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-bypassed: 50
-  icmp:
-    new: 30
-    established: 300
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-bypassed: 50
-
-# Stream engine settings. Here the TCP stream tracking and reassembly
-# engine is configured.
-#
-# stream:
-#   memcap: 64mb                # Can be specified in kb, mb, gb.  Just a
-#                               # number indicates it's in bytes.
-#   memcap-policy: ignore       # Can be "drop-flow", "pass-flow", "bypass",
-#                               # "drop-packet", "pass-packet", "reject" or
-#                               # "ignore" default is "ignore"
-#   checksum-validation: yes    # To validate the checksum of received
-#                               # packet. If csum validation is specified as
-#                               # "yes", then packets with invalid csum values will not
-#                               # be processed by the engine stream/app layer.
-#                               # Warning: locally generated traffic can be
-#                               # generated without checksum due to hardware offload
-#                               # of checksum. You can control the handling of checksum
-#                               # on a per-interface basis via the 'checksum-checks'
-#                               # option
-#   prealloc-sessions: 2048     # 2k sessions prealloc'd per stream thread
-#   midstream: false            # don't allow midstream session pickups
-#   midstream-policy: ignore    # Can be "drop-flow", "pass-flow", "bypass",
-#                               # "drop-packet", "pass-packet", "reject" or
-#                               # "ignore" default is "ignore"
-#   async-oneside: false        # don't enable async stream handling
-#   inline: no                  # stream inline mode
-#   drop-invalid: yes           # in inline mode, drop packets that are invalid with regards to streaming engine
-#   max-syn-queued: 10          # Max different SYNs to queue
-#   max-synack-queued: 5        # Max different SYN/ACKs to queue
-#   bypass: no                  # Bypass packets when stream.reassembly.depth is reached.
-#                               # Warning: first side to reach this triggers
-#                               # the bypass.
-#   liberal-timestamps: false   # Treat all timestamps as if the Linux policy applies. This
-#                               # means it's slightly more permissive. Enabled by default.
-#
-#   reassembly:
-#     memcap: 256mb             # Can be specified in kb, mb, gb.  Just a number
-#                               # indicates it's in bytes.
-#     memcap-policy: ignore     # Can be "drop-flow", "pass-flow", "bypass",
-#                               # "drop-packet", "pass-packet", "reject" or
-#                               # "ignore" default is "ignore"
-#     depth: 1mb                # Can be specified in kb, mb, gb.  Just a number
-#                               # indicates it's in bytes.
-#     toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
-#                               # this size.  Can be specified in kb, mb,
-#                               # gb.  Just a number indicates it's in bytes.
-#     toclient-chunk-size: 2560 # inspect raw stream in chunks of at least
-#                               # this size.  Can be specified in kb, mb,
-#                               # gb.  Just a number indicates it's in bytes.
-#     randomize-chunk-size: yes # Take a random value for chunk size around the specified value.
-#                               # This lowers the risk of some evasion techniques but could lead
-#                               # to detection change between runs. It is set to 'yes' by default.
-#     randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is
-#                               # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size
-#                               # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same
-#                               # calculation for toclient-chunk-size.
-#                               # Default value of randomize-chunk-range is 10.
-#
-#     raw: yes                  # 'Raw' reassembly enabled or disabled.
-#                               # raw is for content inspection by detection
-#                               # engine.
-#
-#     segment-prealloc: 2048    # number of segments preallocated per thread
-#
-#     check-overlap-different-data: true|false
-#                               # check if a segment contains different data
-#                               # than what we've already seen for that
-#                               # position in the stream.
-#                               # This is enabled automatically if inline mode
-#                               # is used or when stream-event:reassembly_overlap_different_data;
-#                               # is used in a rule.
-#
-stream:
-  memcap: 64mb
-  #memcap-policy: ignore
-  checksum-validation: yes      # reject incorrect csums
-  #midstream: false
-  #midstream-policy: ignore
-  inline: auto                  # auto will use inline mode in IPS mode, yes or no set it statically
-  reassembly:
-    memcap: 256mb
-    #memcap-policy: ignore
-    depth: 1mb                  # reassemble 1mb into a stream
-    toserver-chunk-size: 2560
-    toclient-chunk-size: 2560
-    randomize-chunk-size: yes
-    #randomize-chunk-range: 10
-    #raw: yes
-    #segment-prealloc: 2048
-    #check-overlap-different-data: true
-
-# Host table:
-#
-# Host table is used by the tagging and per host thresholding subsystems.
-#
-host:
-  hash-size: 4096
-  prealloc: 1000
-  memcap: 32mb
-
-# IP Pair table:
-#
-# Used by xbits 'ippair' tracking.
-#
-#ippair:
-#  hash-size: 4096
-#  prealloc: 1000
-#  memcap: 32mb
-
-# Decoder settings
-
-decoder:
-  # Teredo decoder is known to not be completely accurate
-  # as it will sometimes detect non-teredo as teredo.
-  teredo:
-    enabled: true
-    # ports to look for Teredo. Max 4 ports. If no ports are given, or
-    # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
-    ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
-
-  # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
-  # IANA assigned port 4789 is enabled.
-  vxlan:
-    enabled: true
-    ports: $VXLAN_PORTS # syntax: '[8472, 4789]' or '4789'.
-
-  # Geneve decoder is assigned to up to 4 UDP ports. By default only the
-  # IANA assigned port 6081 is enabled.
-  geneve:
-    enabled: true
-    ports: $GENEVE_PORTS # syntax: '[6081, 1234]' or '6081'.
-
-  # maximum number of decoder layers for a packet
-  # max-layers: 16
-
-##
-## Performance tuning and profiling
-##
-
-# The detection engine builds internal groups of signatures. The engine
-# allows us to specify the profile to use for them, to manage memory in an
-# efficient way keeping good performance. For the profile keyword you
-# can use the words "low", "medium", "high" or "custom". If you use custom,
-# make sure to define the values in the "custom-values" section.
-# Usually you would prefer medium/high/low.
-#
-# "sgh mpm-context", indicates how the staging should allot mpm contexts for
-# the signature groups.  "single" indicates the use of a single context for
-# all the signature group heads.  "full" indicates a mpm-context for each
-# group head.  "auto" lets the engine decide the distribution of contexts
-# based on the information the engine gathers on the patterns from each
-# group head.
-#
-# The option inspection-recursion-limit is used to limit the recursive calls
-# in the content inspection code.  For certain payload-sig combinations, we
-# might end up taking too much time in the content inspection code.
-# If the argument specified is 0, the engine uses an internally defined
-# default limit.  When a value is not specified, there are no limits on the recursion.
-detect:
-  profile: medium
-  custom-values:
-    toclient-groups: 3
-    toserver-groups: 25
-  sgh-mpm-context: auto
-  inspection-recursion-limit: 3000
-  # If set to yes, the loading of signatures will be made after the capture
-  # is started. This will limit the downtime in IPS mode.
-  #delayed-detect: yes
-
-  prefilter:
-    # default prefiltering setting. "mpm" only creates MPM/fast_pattern
-    # engines. "auto" also sets up prefilter engines for other keywords.
-    # Use --list-keywords=all to see which keywords support prefiltering.
-    default: mpm
-
-  # the grouping values above control how many groups are created per
-  # direction. Port whitelisting forces that port to get its own group.
-  # Very common ports will benefit, as well as ports with many expensive
-  # rules.
-  grouping:
-    #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080
-    #udp-whitelist: 53, 135, 5060
-
-  profiling:
-    # Log the rules that made it past the prefilter stage, per packet
-    # default is off. The threshold setting determines how many rules
-    # must have made it past pre-filter for that rule to trigger the
-    # logging.
-    #inspect-logging-threshold: 200
-    grouping:
-      dump-to-disk: false
-      include-rules: false      # very verbose
-      include-mpm-stats: false
-
-# Select the multi pattern algorithm you want to run for scan/search the
-# in the engine.
-#
-# The supported algorithms are:
-# "ac"      - Aho-Corasick, default implementation
-# "ac-bs"   - Aho-Corasick, reduced memory implementation
-# "ac-ks"   - Aho-Corasick, "Ken Steele" variant
-# "hs"      - Hyperscan, available when built with Hyperscan support
-#
-# The default mpm-algo value of "auto" will use "hs" if Hyperscan is
-# available, "ac" otherwise.
-#
-# The mpm you choose also decides the distribution of mpm contexts for
-# signature groups, specified by the conf - "detect.sgh-mpm-context".
-# Selecting "ac" as the mpm would require "detect.sgh-mpm-context"
-# to be set to "single", because of ac's memory requirements, unless the
-# ruleset is small enough to fit in memory, in which case one can
-# use "full" with "ac".  The rest of the mpms can be run in "full" mode.
-
-mpm-algo: auto
-
-# Select the matching algorithm you want to use for single-pattern searches.
-#
-# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only
-# available if Suricata has been built with Hyperscan support).
-#
-# The default of "auto" will use "hs" if available, otherwise "bm".
-
-spm-algo: auto
-
-# Suricata is multi-threaded. Here the threading can be influenced.
-threading:
-  set-cpu-affinity: no
-  # Tune cpu affinity of threads. Each family of threads can be bound
-  # to specific CPUs.
-  #
-  # These 2 apply to the all runmodes:
-  # management-cpu-set is used for flow timeout handling, counters
-  # worker-cpu-set is used for 'worker' threads
-  #
-  # Additionally, for autofp these apply:
-  # receive-cpu-set is used for capture threads
-  # verdict-cpu-set is used for IPS verdict threads
-  #
-  cpu-affinity:
-    - management-cpu-set:
-        cpu: [ 0 ]  # include only these CPUs in affinity settings
-    - receive-cpu-set:
-        cpu: [ 0 ]  # include only these CPUs in affinity settings
-    - worker-cpu-set:
-        cpu: [ "all" ]
-        mode: "exclusive"
-        # Use explicitly 3 threads and don't compute number by using
-        # detect-thread-ratio variable:
-        # threads: 3
-        prio:
-          low: [ 0 ]
-          medium: [ "1-2" ]
-          high: [ 3 ]
-          default: "medium"
-    #- verdict-cpu-set:
-    #    cpu: [ 0 ]
-    #    prio:
-    #      default: "high"
-  #
-  # By default Suricata creates one "detect" thread per available CPU/CPU core.
-  # This setting allows controlling this behaviour. A ratio setting of 2 will
-  # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
-  # will result in 4 detect threads. If values below 1 are used, less threads
-  # are created. So on a dual core CPU a setting of 0.5 results in 1 detect
-  # thread being created. Regardless of the setting at a minimum 1 detect
-  # thread will always be created.
-  #
-  detect-thread-ratio: 1.0
-  #
-  # By default, the per-thread stack size is left to its default setting. If
-  # the default thread stack size is too small, use the following configuration
-  # setting to change the size. Note that if any thread's stack size cannot be
-  # set to this value, a fatal error occurs.
-  #
-  # Generally, the per-thread stack-size should not exceed 8MB.
-  #stack-size: 8mb
-
-# Luajit has a strange memory requirement, its 'states' need to be in the
-# first 2G of the process' memory.
-#
-# 'luajit.states' is used to control how many states are preallocated.
-# State use: per detect script: 1 per detect thread. Per output script: 1 per
-# script.
-luajit:
-  states: 128
-
-# Profiling settings. Only effective if Suricata has been built with
-# the --enable-profiling configure flag.
-#
-profiling:
-  # Run profiling for every X-th packet. The default is 1, which means we
-  # profile every packet. If set to 1024, one packet is profiled for every
-  # 1024 received. The sample rate must be a power of 2.
-  #sample-rate: 1024
-
-  # rule profiling
-  rules:
-
-    # Profiling can be disabled here, but it will still have a
-    # performance impact if compiled in.
-    enabled: no
-    filename: rule_perf.log
-    append: yes
-
-    # Sort options: ticks, avgticks, checks, matches, maxticks
-    # If commented out all the sort options will be used.
-    #sort: avgticks
-
-    # Limit the number of sids for which stats are shown at exit (per sort).
-    limit: 10
-
-    # output to json
-    json: no
-
-  # per keyword profiling
-  keywords:
-    enabled: no
-    filename: keyword_perf.log
-    append: yes
-
-  prefilter:
-    enabled: no
-    filename: prefilter_perf.log
-    append: yes
-
-  # per rulegroup profiling
-  rulegroups:
-    enabled: no
-    filename: rule_group_perf.log
-    append: yes
-
-  # packet profiling
-  packets:
-
-    # Profiling can be disabled here, but it will still have a
-    # performance impact if compiled in.
-    enabled: no
-    filename: packet_stats.log
-    append: yes
-
-    # per packet csv output
-    csv:
-
-      # Output can be disabled here, but it will still have a
-      # performance impact if compiled in.
-      enabled: no
-      filename: packet_stats.csv
-
-  # profiling of locking. Only available when Suricata was built with
-  # --enable-profiling-locks.
-  locks:
-    enabled: no
-    filename: lock_stats.log
-    append: yes
-
-  pcap-log:
-    enabled: no
-    filename: pcaplog_stats.log
-    append: yes
-
-##
-## Netfilter integration
-##
-
-# When running in NFQ inline mode, it is possible to use a simulated
-# non-terminal NFQUEUE verdict.
-# This permits sending all needed packet to Suricata via this rule:
-#        iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
-# And below, you can have your standard filtering ruleset. To activate
-# this mode, you need to set mode to 'repeat'
-# If you want a packet to be sent to another queue after an ACCEPT decision
-# set the mode to 'route' and set next-queue value.
-# On Linux >= 3.1, you can set batchcount to a value > 1 to improve performance
-# by processing several packets before sending a verdict (worker runmode only).
-# On Linux >= 3.6, you can set the fail-open option to yes to have the kernel
-# accept the packet if Suricata is not able to keep pace.
-# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
-# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
-# on packet of a flow that need to be bypassed. The Netfilter ruleset has to
-# directly accept all packets of a flow once a packet has been marked.
-nfq:
-#  mode: accept
-#  repeat-mark: 1
-#  repeat-mask: 1
-#  bypass-mark: 1
-#  bypass-mask: 1
-#  route-queue: 2
-#  batchcount: 20
-#  fail-open: yes
-
-#nflog support
-nflog:
-    # netlink multicast group
-    # (the same as the iptables --nflog-group param)
-    # Group 0 is used by the kernel, so you can't use it
-  - group: 2
-    # netlink buffer size
-    buffer-size: 18432
-    # put default value here
-  - group: default
-    # set number of packets to queue inside kernel
-    qthreshold: 1
-    # set the delay before flushing packet in the kernel's queue
-    qtimeout: 100
-    # netlink max buffer size
-    max-size: 20000
-
-##
-## Advanced Capture Options
-##
-
-# General settings affecting packet capture
-capture:
-  # disable NIC offloading. It's restored when Suricata exits.
-  # Enabled by default.
-  #disable-offloading: false
-  #
-  # disable checksum validation. Same as setting '-k none' on the
-  # command-line.
-  #checksum-validation: none
-
-# Netmap support
-#
-# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which has
-# built-in Netmap support or compile and install the Netmap module and appropriate
-# NIC driver for your Linux system.
-# To reach maximum throughput disable all receive-, segmentation-,
-# checksum- offloading on your NIC (using ethtool or similar).
-# Disabling TX checksum offloading is *required* for connecting OS endpoint
-# with NIC endpoint.
-# You can find more information at https://github.com/luigirizzo/netmap
-#
-netmap:
-   # To specify OS endpoint add plus sign at the end (e.g. "eth0+")
- - interface: eth2
-   # Number of capture threads. "auto" uses number of RSS queues on interface.
-   # Warning: unless the RSS hashing is symmetrical, this will lead to
-   # accuracy issues.
-   #threads: auto
-   # You can use the following variables to activate netmap tap or IPS mode.
-   # If copy-mode is set to ips or tap, the traffic coming to the current
-   # interface will be copied to the copy-iface interface. If 'tap' is set, the
-   # copy is complete. If 'ips' is set, the packet matching a 'drop' action
-   # will not be copied.
-   # To specify the OS as the copy-iface (so the OS can route packets, or forward
-   # to a service running on the same machine) add a plus sign at the end
-   # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0
-   # for return packets. Hardware checksumming must be *off* on the interface if
-   # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD
-   # or 'ethtool -K eth0 tx off rx off' for Linux).
-   #copy-mode: tap
-   #copy-iface: eth3
-   # Set to yes to disable promiscuous mode
-   # disable-promisc: no
-   # Choose checksum verification mode for the interface. At the moment
-   # of the capture, some packets may have an invalid checksum due to
-   # the checksum computation being offloaded to the network card.
-   # Possible values are:
-   #  - yes: checksum validation is forced
-   #  - no: checksum validation is disabled
-   #  - auto: Suricata uses a statistical approach to detect when
-   #  checksum off-loading is used.
-   # Warning: 'checksum-validation' must be set to yes to have any validation
-   #checksum-checks: auto
-   # BPF filter to apply to this interface. The pcap filter syntax apply here.
-   #bpf-filter: port 80 or udp
- #- interface: eth3
-   #threads: auto
-   #copy-mode: tap
-   #copy-iface: eth2
-   # Put default values here
- - interface: default
-
-# PF_RING configuration: for use with native PF_RING support
-# for more info see http://www.ntop.org/products/pf_ring/
-pfring:
-  - interface: eth0
-    # Number of receive threads. If set to 'auto' Suricata will first try
-    # to use CPU (core) count and otherwise RSS queue count.
-    threads: auto
-
-    # Default clusterid.  PF_RING will load balance packets based on flow.
-    # All threads/processes that will participate need to have the same
-    # clusterid.
-    cluster-id: 99
-
-    # Default PF_RING cluster type. PF_RING can load balance per flow.
-    # Possible values are:
-    # - cluster_flow:               6-tuple: <src ip, src_port, dst ip, dst port, proto, vlan>
-    # - cluster_inner_flow:         6-tuple: <src ip, src port, dst ip, dst port, proto, vlan>
-    # - cluster_inner_flow_2_tuple: 2-tuple: <src ip,           dst ip                       >
-    # - cluster_inner_flow_4_tuple: 4-tuple: <src ip, src port, dst ip, dst port             >
-    # - cluster_inner_flow_5_tuple: 5-tuple: <src ip, src port, dst ip, dst port, proto      >
-    # - cluster_round_robin (NOT RECOMMENDED)
-    cluster-type: cluster_flow
-
-    # bpf filter for this interface
-    #bpf-filter: tcp
-
-    # If bypass is set then the PF_RING hw bypass is activated, when supported
-    # by the network interface. Suricata will instruct the interface to bypass
-    # all future packets for a flow that need to be bypassed.
-    #bypass: yes
-
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - rxonly: only compute checksum for packets received by network card.
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used. (default)
-    # Warning: 'checksum-validation' must be set to yes to have any validation
-    #checksum-checks: auto
-  # Second interface
-  #- interface: eth1
-  #  threads: 3
-  #  cluster-id: 93
-  #  cluster-type: cluster_flow
-  # Put default values here
-  - interface: default
-    #threads: 2
-
-# For FreeBSD ipfw(8) divert(4) support.
-# Please make sure you have ipfw_load="YES" and ipdivert_load="YES"
-# in /etc/loader.conf or kldload'ing the appropriate kernel modules.
-# Additionally, you need to have an ipfw rule for the engine to see
-# the packets from ipfw.  For Example:
-#
-#   ipfw add 100 divert 8000 ip from any to any
-#
-# N.B. This example uses "8000" -- this number must mach the values
-# you passed on the command line, i.e., -d 8000
-#
-ipfw:
-
-  # Reinject packets at the specified ipfw rule number.  This config
-  # option is the ipfw rule number AT WHICH rule processing continues
-  # in the ipfw processing system after the engine has finished
-  # inspecting the packet for acceptance.  If no rule number is specified,
-  # accepted packets are reinjected at the divert rule which they entered
-  # and IPFW rule processing continues.  No check is done to verify
-  # this will rule makes sense so care must be taken to avoid loops in ipfw.
-  #
-  ## The following example tells the engine to reinject packets
-  # back into the ipfw firewall AT rule number 5500:
-  #
-  # ipfw-reinjection-rule-number: 5500
-
-
-napatech:
-    # When use_all_streams is set to "yes" the initialization code will query
-    # the Napatech service for all configured streams and listen on all of them.
-    # When set to "no" the streams config array will be used.
-    #
-    # This option necessitates running the appropriate NTPL commands to create
-    # the desired streams prior to running Suricata.
-    #use-all-streams: no
-
-    # The streams to listen on when auto-config is disabled or when and threading
-    # cpu-affinity is disabled.  This can be either:
-    #   an individual stream (e.g. streams: [0])
-    # or
-    #   a range of streams (e.g. streams: ["0-3"])
-    #
-    streams: ["0-3"]
-
-    # Stream stats can be enabled to provide fine grain packet and byte counters
-    # for each thread/stream that is configured.
-    #
-    enable-stream-stats: no
-
-    # When auto-config is enabled the streams will be created and assigned
-    # automatically to the NUMA node where the thread resides.  If cpu-affinity
-    # is enabled in the threading section.  Then the streams will be created
-    # according to the number of worker threads specified in the worker-cpu-set.
-    # Otherwise, the streams array is used to define the streams.
-    #
-    # This option is intended primarily to support legacy configurations.
-    #
-    # This option cannot be used simultaneously with either "use-all-streams"
-    # or "hardware-bypass".
-    #
-    auto-config: yes
-
-    # Enable hardware level flow bypass.
-    #
-    hardware-bypass: yes
-
-    # Enable inline operation.  When enabled traffic arriving on a given port is
-    # automatically forwarded out its peer port after analysis by Suricata.
-    #
-    inline: no
-
-    # Ports indicates which Napatech ports are to be used in auto-config mode.
-    # these are the port IDs of the ports that will be merged prior to the
-    # traffic being distributed to the streams.
-    #
-    # When hardware-bypass is enabled the ports must be configured as a segment.
-    # specify the port(s) on which upstream and downstream traffic will arrive.
-    # This information is necessary for the hardware to properly process flows.
-    #
-    # When using a tap configuration one of the ports will receive inbound traffic
-    # for the network and the other will receive outbound traffic. The two ports on a
-    # given segment must reside on the same network adapter.
-    #
-    # When using a SPAN-port configuration the upstream and downstream traffic
-    # arrives on a single port. This is configured by setting the two sides of the
-    # segment to reference the same port.  (e.g. 0-0 to configure a SPAN port on
-    # port 0).
-    #
-    # port segments are specified in the form:
-    #    ports: [0-1,2-3,4-5,6-6,7-7]
-    #
-    # For legacy systems when hardware-bypass is disabled this can be specified in any
-    # of the following ways:
-    #
-    #   a list of individual ports (e.g. ports: [0,1,2,3])
-    #
-    #   a range of ports (e.g. ports: [0-3])
-    #
-    #   "all" to indicate that all ports are to be merged together
-    #   (e.g. ports: [all])
-    #
-    # This parameter has no effect if auto-config is disabled.
-    #
-    ports: [0-1,2-3]
-
-    # When auto-config is enabled the hashmode specifies the algorithm for
-    # determining to which stream a given packet is to be delivered.
-    # This can be any valid Napatech NTPL hashmode command.
-    #
-    # The most common hashmode commands are:  hash2tuple, hash2tuplesorted,
-    # hash5tuple, hash5tuplesorted and roundrobin.
-    #
-    # See Napatech NTPL documentation other hashmodes and details on their use.
-    #
-    # This parameter has no effect if auto-config is disabled.
-    #
-    hashmode: hash5tuplesorted
-
-##
-## Configure Suricata to load Suricata-Update managed rules.
-##
-
-default-rule-path: /var/lib/suricata/rules
-
-rule-files:
-  - suricata.rules
-
-##
-## Auxiliary configuration files.
-##
-
-classification-file: /etc/suricata/classification.config
-reference-config-file: /etc/suricata/reference.config
-# threshold-file: /etc/suricata/threshold.config
-
-##
-## Include other configs
-##
-
-# Includes:  Files included here will be handled as if they were in-lined
-# in this configuration file. Files with relative pathnames will be
-# searched for in the same directory as this configuration file. You may
-# use absolute pathnames too.
-#include:
-#  - include1.yaml
-#  - include2.yaml
diff --git a/docker/suricata/dist/suricata_old.yaml b/docker/suricata/dist/suricata_old.yaml
deleted file mode 100644
index bb523417..00000000
--- a/docker/suricata/dist/suricata_old.yaml
+++ /dev/null
@@ -1,1887 +0,0 @@
-%YAML 1.1
----
-
-# Suricata configuration file. In addition to the comments describing all
-# options in this file, full documentation can be found at:
-# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
-
-##
-## Step 1: Inform Suricata about your network
-##
-
-vars:
-  # more specific is better for alert accuracy and performance
-  address-groups:
-    HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
-    #HOME_NET: "[192.168.0.0/16]"
-    #HOME_NET: "[10.0.0.0/8]"
-    #HOME_NET: "[172.16.0.0/12]"
-    #HOME_NET: "any"
-
-    #EXTERNAL_NET: "!$HOME_NET"
-    EXTERNAL_NET: "any"
-
-    HTTP_SERVERS: "$HOME_NET"
-    SMTP_SERVERS: "$HOME_NET"
-    SQL_SERVERS: "$HOME_NET"
-    DNS_SERVERS: "$HOME_NET"
-    TELNET_SERVERS: "$HOME_NET"
-    AIM_SERVERS: "$EXTERNAL_NET"
-    DC_SERVERS: "$HOME_NET"
-    DNP3_SERVER: "$HOME_NET"
-    DNP3_CLIENT: "$HOME_NET"
-    MODBUS_CLIENT: "$HOME_NET"
-    MODBUS_SERVER: "$HOME_NET"
-    ENIP_CLIENT: "$HOME_NET"
-    ENIP_SERVER: "$HOME_NET"
-
-  port-groups:
-    HTTP_PORTS: "80,8080,8081"
-    SHELLCODE_PORTS: "!80,!8080,!8081"
-    ORACLE_PORTS: "1433,1521,3306"
-    SSH_PORTS: "22,64295"
-    DNP3_PORTS: 20000
-    MODBUS_PORTS: 502
-    FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
-    FTP_PORTS: 21
-    GENEVE_PORTS: 6081
-    VXLAN_PORTS: 4789
-    TEREDO_PORTS: 3544
-
-##
-## Step 2: Select outputs to enable
-##
-
-# The default logging directory.  Any log or output file will be
-# placed here if it's not specified with a full path name. This can be
-# overridden with the -l command line parameter.
-default-log-dir: /var/log/suricata/
-
-# Global stats configuration
-stats:
-  enabled: no
-  # The interval field (in seconds) controls the interval at
-  # which stats are updated in the log.
-  interval: 8
-  # Add decode events to stats.
-  #decoder-events: true
-  # Decoder event prefix in stats. Has been 'decoder' before, but that leads
-  # to missing events in the eve.stats records. See issue #2225.
-  decoder-events-prefix: "decoder.event"
-  # Add stream events as stats.
-  #stream-events: false
-
-# Configure the type of alert (and other) logging you would like.
-outputs:
-  # a line based alerts log similar to Snort's fast.log
-  - fast:
-      enabled: no
-      filename: fast.log
-      append: yes
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # Extensible Event Format (nicknamed EVE) event log in JSON format
-  - eve-log:
-      enabled: yes
-      filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
-      filename: eve.json
-      # Enable for multi-threaded eve.json output; output files are amended with
-      # with an identifier, e.g., eve.9.json
-      #threaded: false
-      #prefix: "@cee: " # prefix to prepend to each log entry
-      # the following are valid when type: syslog above
-      #identity: "suricata"
-      #facility: local5
-      #level: Info ## possible levels: Emergency, Alert, Critical,
-                   ## Error, Warning, Notice, Info, Debug
-      #ethernet: no  # log ethernet header in events when available
-      #redis:
-      #  server: 127.0.0.1
-      #  port: 6379
-      #  async: true ## if redis replies are read asynchronously
-      #  mode: list ## possible values: list|lpush (default), rpush, channel|publish
-      #             ## lpush and rpush are using a Redis list. "list" is an alias for lpush
-      #             ## publish is using a Redis channel. "channel" is an alias for publish
-      #  key: suricata ## key or channel to use (default to suricata)
-      # Redis pipelining set up. This will enable to only do a query every
-      # 'batch-size' events. This should lower the latency induced by network
-      # connection at the cost of some memory. There is no flushing implemented
-      # so this setting should be reserved to high traffic Suricata deployments.
-      #  pipelining:
-      #    enabled: yes ## set enable to yes to enable query pipelining
-      #    batch-size: 10 ## number of entries to keep in buffer
-
-      # Include top level metadata. Default yes.
-      #metadata: no
-
-      # include the name of the input pcap file in pcap file processing mode
-      pcap-file: false
-
-      # Community Flow ID
-      # Adds a 'community_id' field to EVE records. These are meant to give
-      # records a predictable flow ID that can be used to match records to
-      # output of other tools such as Zeek (Bro).
-      #
-      # Takes a 'seed' that needs to be same across sensors and tools
-      # to make the id less predictable.
-
-      # enable/disable the community id feature.
-      community-id: false
-      # Seed value for the ID output. Valid values are 0-65535.
-      community-id-seed: 0
-
-      # HTTP X-Forwarded-For support by adding an extra field or overwriting
-      # the source or destination IP address (depending on flow direction)
-      # with the one reported in the X-Forwarded-For HTTP header. This is
-      # helpful when reviewing alerts for traffic that is being reverse
-      # or forward proxied.
-      xff:
-        enabled: yes
-        # Two operation modes are available: "extra-data" and "overwrite".
-        mode: extra-data
-        # Two proxy deployments are supported: "reverse" and "forward". In
-        # a "reverse" deployment the IP address used is the last one, in a
-        # "forward" deployment the first IP address is used.
-        deployment: reverse
-        # Header name where the actual IP address will be reported. If more
-        # than one IP address is present, the last IP address will be the
-        # one taken into consideration.
-        header: X-Forwarded-For
-
-      types:
-        - alert:
-            payload: yes             # enable dumping payload in Base64
-            payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
-            payload-printable: yes   # enable dumping payload in printable (lossy) format
-            # packet: yes              # enable dumping of packet (without stream segments)
-            # metadata: no             # enable inclusion of app layer metadata with alert. Default yes
-            http-body: yes           # Requires metadata; enable dumping of HTTP body in Base64
-            http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format
-
-            # Enable the logging of tagged packets for rules using the
-            # "tag" keyword.
-            tagged-packets: yes
-        - anomaly:
-            # Anomaly log records describe unexpected conditions such
-            # as truncated packets, packets with invalid IP/UDP/TCP
-            # length values, and other events that render the packet
-            # invalid for further processing or describe unexpected
-            # behavior on an established stream. Networks which
-            # experience high occurrences of anomalies may experience
-            # packet processing degradation.
-            #
-            # Anomalies are reported for the following:
-            # 1. Decode: Values and conditions that are detected while
-            # decoding individual packets. This includes invalid or
-            # unexpected values for low-level protocol lengths as well
-            # as stream related events (TCP 3-way handshake issues,
-            # unexpected sequence number, etc).
-            # 2. Stream: This includes stream related events (TCP
-            # 3-way handshake issues, unexpected sequence number,
-            # etc).
-            # 3. Application layer: These denote application layer
-            # specific conditions that are unexpected, invalid or are
-            # unexpected given the application monitoring state.
-            #
-            # By default, anomaly logging is enabled. When anomaly
-            # logging is enabled, applayer anomaly reporting is
-            # also enabled.
-            enabled: yes
-            #
-            # Choose one or more types of anomaly logging and whether to enable
-            # logging of the packet header for packet anomalies.
-            types:
-              # decode: no
-              # stream: no
-              # applayer: yes
-            #packethdr: no
-        - http:
-            extended: yes     # enable this for extended logging information
-            # custom allows additional HTTP fields to be included in eve-log.
-            # the example below adds three additional fields when uncommented
-            custom: [Accept-Encoding, Accept-Language, Authorization, Forwarded, From, Referer, Via]
-            # set this value to one and only one from {both, request, response}
-            # to dump all HTTP headers for every HTTP request and/or response
-            # dump-all-headers: none
-        - dns:
-            # This configuration uses the new DNS logging format,
-            # the old configuration is still available:
-            # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
-
-            # As of Suricata 5.0, version 2 of the eve dns output
-            # format is the default.
-            #version: 2
-
-            # Enable/disable this logger. Default: enabled.
-            #enabled: yes
-
-            # Control logging of requests and responses:
-            # - requests: enable logging of DNS queries
-            # - responses: enable logging of DNS answers
-            # By default both requests and responses are logged.
-            #requests: no
-            #responses: no
-
-            # Format of answer logging:
-            # - detailed: array item per answer
-            # - grouped: answers aggregated by type
-            # Default: all
-            #formats: [detailed, grouped]
-
-            # DNS record types to log, based on the query type.
-            # Default: all.
-            #types: [a, aaaa, cname, mx, ns, ptr, txt]
-        - tls:
-            extended: yes     # enable this for extended logging information
-            # output TLS transaction where the session is resumed using a
-            # session id
-            #session-resumption: no
-            # custom controls which TLS fields that are included in eve-log
-            custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, ja3, ja3s]
-        - files:
-            force-magic: yes   # force logging magic on all logged files
-            # force logging of checksums, available hash functions are md5,
-            # sha1 and sha256
-            force-hash: [md5]
-        #- drop:
-        #    alerts: yes      # log alerts that caused drops
-        #    flows: all       # start or all: 'start' logs only a single drop
-        #                     # per flow direction. All logs each dropped pkt.
-        - smtp:
-            extended: yes # enable this for extended logging information
-            # this includes: bcc, message-id, subject, x_mailer, user-agent
-            # custom fields logging from the list:
-            #  reply-to, bcc, message-id, subject, x-mailer, user-agent, received,
-            #  x-originating-ip, in-reply-to, references, importance, priority,
-            #  sensitivity, organization, content-md5, date
-            custom: [reply-to, bcc, message-id, subject, x-mailer, user-agent, received, x-originating-ip, in-reply-to, references, organization, date]
-            # output md5 of fields: body, subject
-            # for the body you need to set app-layer.protocols.smtp.mime.body-md5
-            # to yes
-            md5: [body, subject]
-
-        - dnp3
-        - ftp
-        - rdp
-        - nfs
-        - smb
-        - tftp
-        - ikev2
-        - dcerpc
-        - krb5
-        - snmp
-        - rfb
-        - sip
-        - dhcp:
-            enabled: no
-            # When extended mode is on, all DHCP messages are logged
-            # with full detail. When extended mode is off (the
-            # default), just enough information to map a MAC address
-            # to an IP address is logged.
-            extended: no
-        - ssh
-        - mqtt:
-            passwords: yes           # enable output of passwords
-        # HTTP2 logging. HTTP2 support is currently experimental and
-        # disabled by default. To enable, uncomment the following line
-        # and be sure to enable http2 in the app-layer section.
-        #- http2
-        #- stats:
-            #totals: yes       # stats for all threads merged together
-            #threads: no       # per thread stats
-            #deltas: no        # include delta values
-        # bi-directional flows
-        #- flow
-        # uni-directional flows
-        #- netflow
-
-        # Metadata event type. Triggered whenever a pktvar is saved
-        # and will include the pktvars, flowvars, flowbits and
-        # flowints.
-        #- metadata
-
-  # a line based log of HTTP requests (no alerts)
-  - http-log:
-      enabled: no
-      filename: http.log
-      append: yes
-      #extended: yes     # enable this for extended logging information
-      #custom: yes       # enable the custom logging format (defined by customformat)
-      #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P"
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # a line based log of TLS handshake parameters (no alerts)
-  - tls-log:
-      enabled: no  # Log TLS connections.
-      filename: tls.log # File to store TLS logs.
-      append: yes
-      #extended: yes     # Log extended information like fingerprint
-      #custom: yes       # enabled the custom logging format (defined by customformat)
-      #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D"
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-      # output TLS transaction where the session is resumed using a
-      # session id
-      #session-resumption: no
-
-  # output module to store certificates chain to disk
-  - tls-store:
-      enabled: no
-      #certs-log-dir: certs # directory to store the certificates files
-
-  # Packet log... log packets in pcap format. 3 modes of operation: "normal"
-  # "multi" and "sguil".
-  #
-  # In normal mode a pcap file "filename" is created in the default-log-dir,
-  # or as specified by "dir".
-  # In multi mode, a file is created per thread. This will perform much
-  # better, but will create multiple files where 'normal' would create one.
-  # In multi mode the filename takes a few special variables:
-  # - %n -- thread number
-  # - %i -- thread id
-  # - %t -- timestamp (secs or secs.usecs based on 'ts-format'
-  # E.g. filename: pcap.%n.%t
-  #
-  # Note that it's possible to use directories, but the directories are not
-  # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the
-  # per thread directory.
-  #
-  # Also note that the limit and max-files settings are enforced per thread.
-  # So the size limit when using 8 threads with 1000mb files and 2000 files
-  # is: 8*1000*2000 ~ 16TiB.
-  #
-  # In Sguil mode "dir" indicates the base directory. In this base dir the
-  # pcaps are created in the directory structure Sguil expects:
-  #
-  # $sguil-base-dir/YYYY-MM-DD/$filename.<timestamp>
-  #
-  # By default all packets are logged except:
-  # - TCP streams beyond stream.reassembly.depth
-  # - encrypted streams after the key exchange
-  #
-  - pcap-log:
-      enabled: no
-      filename: log.pcap
-
-      # File size limit.  Can be specified in kb, mb, gb.  Just a number
-      # is parsed as bytes.
-      limit: 1000mb
-
-      # If set to a value, ring buffer mode is enabled. Will keep maximum of
-      # "max-files" of size "limit"
-      max-files: 2000
-
-      # Compression algorithm for pcap files. Possible values: none, lz4.
-      # Enabling compression is incompatible with the sguil mode. Note also
-      # that on Windows, enabling compression will *increase* disk I/O.
-      compression: none
-
-      # Further options for lz4 compression. The compression level can be set
-      # to a value between 0 and 16, where higher values result in higher
-      # compression.
-      #lz4-checksum: no
-      #lz4-level: 0
-
-      mode: normal # normal, multi or sguil.
-
-      # Directory to place pcap files. If not provided the default log
-      # directory will be used. Required for "sguil" mode.
-      #dir: /nsm_data/
-
-      #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec
-      use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets
-      honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stop being logged.
-
-  # a full alert log containing much information for signature writers
-  # or for investigating suspected false positives.
-  - alert-debug:
-      enabled: no
-      filename: alert-debug.log
-      append: yes
-      #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
-  # alert output to prelude (https://www.prelude-siem.org/) only
-  # available if Suricata has been compiled with --enable-prelude
-  - alert-prelude:
-      enabled: no
-      profile: suricata
-      log-packet-content: no
-      log-packet-header: yes
-
-  # Stats.log contains data from various counters of the Suricata engine.
-  - stats:
-      enabled: no
-      filename: stats.log
-      append: yes       # append to file (yes) or overwrite it (no)
-      totals: yes       # stats for all threads merged together
-      threads: no       # per thread stats
-      #null-values: yes  # print counters that have value 0. Default: no
-
-  # a line based alerts log similar to fast.log into syslog
-  - syslog:
-      enabled: no
-      # reported identity to syslog. If omitted the program name (usually
-      # suricata) will be used.
-      #identity: "suricata"
-      facility: local5
-      #level: Info ## possible levels: Emergency, Alert, Critical,
-                   ## Error, Warning, Notice, Info, Debug
-
-  # Output module for storing files on disk. Files are stored in
-  # directory names consisting of the first 2 characters of the
-  # SHA256 of the file. Each file is given its SHA256 as a filename.
-  #
-  # When a duplicate file is found, the timestamps on the existing file
-  # are updated.
-  #
-  # Unlike the older filestore, metadata is not written by default
-  # as each file should already have a "fileinfo" record in the
-  # eve-log. If write-fileinfo is set to yes, then each file will have
-  # one more associated .json files that consist of the fileinfo
-  # record. A fileinfo file will be written for each occurrence of the
-  # file seen using a filename suffix to ensure uniqueness.
-  #
-  # To prune the filestore directory see the "suricatactl filestore
-  # prune" command which can delete files over a certain age.
-  - file-store:
-      version: 2
-      enabled: no
-
-      # Set the directory for the filestore. Relative pathnames
-      # are contained within the "default-log-dir".
-      #dir: filestore
-
-      # Write out a fileinfo record for each occurrence of a file.
-      # Disabled by default as each occurrence is already logged
-      # as a fileinfo record to the main eve-log.
-      #write-fileinfo: yes
-
-      # Force storing of all files. Default: no.
-      #force-filestore: yes
-
-      # Override the global stream-depth for sessions in which we want
-      # to perform file extraction. Set to 0 for unlimited; otherwise,
-      # must be greater than the global stream-depth value to be used.
-      #stream-depth: 0
-
-      # Uncomment the following variable to define how many files can
-      # remain open for filestore by Suricata. Default value is 0 which
-      # means files get closed after each write to the file.
-      #max-open-files: 1000
-
-      # Force logging of checksums: available hash functions are md5,
-      # sha1 and sha256. Note that SHA256 is automatically forced by
-      # the use of this output module as it uses the SHA256 as the
-      # file naming scheme.
-      #force-hash: [sha1, md5]
-      # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled
-      # HTTP X-Forwarded-For support by adding an extra field or overwriting
-      # the source or destination IP address (depending on flow direction)
-      # with the one reported in the X-Forwarded-For HTTP header. This is
-      # helpful when reviewing alerts for traffic that is being reverse
-      # or forward proxied.
-      xff:
-        enabled: no
-        # Two operation modes are available, "extra-data" and "overwrite".
-        mode: extra-data
-        # Two proxy deployments are supported, "reverse" and "forward". In
-        # a "reverse" deployment the IP address used is the last one, in a
-        # "forward" deployment the first IP address is used.
-        deployment: reverse
-        # Header name where the actual IP address will be reported. If more
-        # than one IP address is present, the last IP address will be the
-        # one taken into consideration.
-        header: X-Forwarded-For
-
-  # Log TCP data after stream normalization
-  # Two types: file or dir:
-  #     - file logs into a single logfile.
-  #     - dir creates 2 files per TCP session and stores the raw TCP
-  #            data into them.
-  # Use 'both' to enable both file and dir modes.
-  #
-  # Note: limited by "stream.reassembly.depth"
-  - tcp-data:
-      enabled: no
-      type: file
-      filename: tcp-data.log
-
-  # Log HTTP body data after normalization, de-chunking and unzipping.
-  # Two types: file or dir.
-  #     - file logs into a single logfile.
-  #     - dir creates 2 files per HTTP session and stores the
-  #           normalized data into them.
-  # Use 'both' to enable both file and dir modes.
-  #
-  # Note: limited by the body limit settings
-  - http-body-data:
-      enabled: no
-      type: file
-      filename: http-data.log
-
-  # Lua Output Support - execute lua script to generate alert and event
-  # output.
-  # Documented at:
-  # https://suricata.readthedocs.io/en/latest/output/lua-output.html
-  - lua:
-      enabled: no
-      #scripts-dir: /etc/suricata/lua-output/
-      scripts:
-      #   - script1.lua
-
-# Logging configuration.  This is not about logging IDS alerts/events, but
-# output about what Suricata is doing, like startup messages, errors, etc.
-logging:
-  # The default log level: can be overridden in an output section.
-  # Note that debug level logging will only be emitted if Suricata was
-  # compiled with the --enable-debug configure option.
-  #
-  # This value is overridden by the SC_LOG_LEVEL env var.
-  default-log-level: notice
-
-  # The default output format.  Optional parameter, should default to
-  # something reasonable if not provided.  Can be overridden in an
-  # output section.  You can leave this out to get the default.
-  #
-  # This value is overridden by the SC_LOG_FORMAT env var.
-  #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
-
-  # A regex to filter output.  Can be overridden in an output section.
-  # Defaults to empty (no filter).
-  #
-  # This value is overridden by the SC_LOG_OP_FILTER env var.
-  default-output-filter:
-
-  # Define your logging outputs.  If none are defined, or they are all
-  # disabled you will get the default: console output.
-  outputs:
-  - console:
-      enabled: yes
-      # type: json
-  - file:
-      enabled: yes
-      level: info
-      filename: /var/log/suricata/suricata.log
-      # type: json
-  - syslog:
-      enabled: no
-      facility: local5
-      format: "[%i] <%d> -- "
-      # type: json
-
-
-##
-## Step 3: Configure common capture settings
-##
-## See "Advanced Capture Options" below for more options, including Netmap
-## and PF_RING.
-##
-
-# Linux high speed capture support
-af-packet:
-  - interface: eth0
-    # Number of receive threads. "auto" uses the number of cores
-    #threads: auto
-    # Default clusterid. AF_PACKET will load balance packets based on flow.
-    cluster-id: 99
-    # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
-    # This is only supported for Linux kernel > 3.1
-    # possible value are:
-    #  * cluster_flow: all packets of a given flow are sent to the same socket
-    #  * cluster_cpu: all packets treated in kernel by a CPU are sent to the same socket
-    #  * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
-    #  socket. Requires at least Linux 3.14.
-    #  * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
-    #  more info.
-    # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
-    # with capture card using RSS (requires cpu affinity tuning and system IRQ tuning)
-    cluster-type: cluster_flow
-    # In some fragmentation cases, the hash can not be computed. If "defrag" is set
-    # to yes, the kernel will do the needed defragmentation before sending the packets.
-    defrag: yes
-    # To use the ring feature of AF_PACKET, set 'use-mmap' to yes
-    #use-mmap: yes
-    # Lock memory map to avoid it being swapped. Be careful that over
-    # subscribing could lock your system
-    #mmap-locked: yes
-    # Use tpacket_v3 capture mode, only active if use-mmap is true
-    # Don't use it in IPS or TAP mode as it causes severe latency
-    #tpacket-v3: yes
-    # Ring size will be computed with respect to "max-pending-packets" and number
-    # of threads. You can set manually the ring size in number of packets by setting
-    # the following value. If you are using flow "cluster-type" and have really network
-    # intensive single-flow you may want to set the "ring-size" independently of the number
-    # of threads:
-    #ring-size: 2048
-    # Block size is used by tpacket_v3 only. It should set to a value high enough to contain
-    # a decent number of packets. Size is in bytes so please consider your MTU. It should be
-    # a power of 2 and it must be multiple of page size (usually 4096).
-    #block-size: 32768
-    # tpacket_v3 block timeout: an open block is passed to userspace if it is not
-    # filled after block-timeout milliseconds.
-    #block-timeout: 10
-    # On busy systems, set it to yes to help recover from a packet drop
-    # phase. This will result in some packets (at max a ring flush) not being inspected.
-    #use-emergency-flush: yes
-    # recv buffer size, increased value could improve performance
-    # buffer-size: 32768
-    # Set to yes to disable promiscuous mode
-    # disable-promisc: no
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - kernel: use indication sent by kernel for each packet (default)
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used.
-    # Warning: 'capture.checksum-validation' must be set to yes to have any validation
-    #checksum-checks: kernel
-    # BPF filter to apply to this interface. The pcap filter syntax applies here.
-    #bpf-filter: port 80 or udp
-    # You can use the following variables to activate AF_PACKET tap or IPS mode.
-    # If copy-mode is set to ips or tap, the traffic coming to the current
-    # interface will be copied to the copy-iface interface. If 'tap' is set, the
-    # copy is complete. If 'ips' is set, the packet matching a 'drop' action
-    # will not be copied.
-    #copy-mode: ips
-    #copy-iface: eth1
-    #  For eBPF and XDP setup including bypass, filter and load balancing, please
-    #  see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
-
-  # Put default values here. These will be used for an interface that is not
-  # in the list above.
-  - interface: default
-    #threads: auto
-    #use-mmap: no
-    #tpacket-v3: yes
-
-# Cross platform libpcap capture support
-pcap:
-  - interface: eth0
-    # On Linux, pcap will try to use mmap'ed capture and will use "buffer-size"
-    # as total memory used by the ring. So set this to something bigger
-    # than 1% of your bandwidth.
-    #buffer-size: 16777216
-    #bpf-filter: "tcp and port 25"
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used. (default)
-    # Warning: 'capture.checksum-validation' must be set to yes to have any validation
-    #checksum-checks: auto
-    # With some accelerator cards using a modified libpcap (like Myricom), you
-    # may want to have the same number of capture threads as the number of capture
-    # rings. In this case, set up the threads variable to N to start N threads
-    # listening on the same interface.
-    #threads: 16
-    # set to no to disable promiscuous mode:
-    #promisc: no
-    # set snaplen, if not set it defaults to MTU if MTU can be known
-    # via ioctl call and to full capture if not.
-    #snaplen: 1518
-  # Put default values here
-  - interface: default
-    #checksum-checks: auto
-
-# Settings for reading pcap files
-pcap-file:
-  # Possible values are:
-  #  - yes: checksum validation is forced
-  #  - no: checksum validation is disabled
-  #  - auto: Suricata uses a statistical approach to detect when
-  #  checksum off-loading is used. (default)
-  # Warning: 'checksum-validation' must be set to yes to have checksum tested
-  checksum-checks: auto
-
-# See "Advanced Capture Options" below for more options, including Netmap
-# and PF_RING.
-
-
-##
-## Step 4: App Layer Protocol configuration
-##
-
-# Configure the app-layer parsers. The protocol's section details each
-# protocol.
-#
-# The option "enabled" takes 3 values - "yes", "no", "detection-only".
-# "yes" enables both detection and the parser, "no" disables both, and
-# "detection-only" enables protocol detection only (parser disabled).
-app-layer:
-  protocols:
-    rfb:
-      enabled: yes
-      detection-ports:
-        dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
-    # MQTT, disabled by default.
-    mqtt:
-      enabled: yes
-      max-msg-length: 1mb
-    krb5:
-      enabled: yes
-    snmp:
-      enabled: yes
-    ikev2:
-      enabled: yes
-    tls:
-      enabled: yes
-      detection-ports:
-        dp: 443
-
-      # Generate JA3 fingerprint from client hello. If not specified it
-      # will be disabled by default, but enabled if rules require it.
-      ja3-fingerprints: yes
-
-      # What to do when the encrypted communications start:
-      # - default: keep tracking TLS session, check for protocol anomalies,
-      #            inspect tls_* keywords. Disables inspection of unmodified
-      #            'content' signatures.
-      # - bypass:  stop processing this flow as much as possible. No further
-      #            TLS parsing and inspection. Offload flow bypass to kernel
-      #            or hardware if possible.
-      # - full:    keep tracking and inspection as normal. Unmodified content
-      #            keyword signatures are inspected as well.
-      #
-      # For best performance, select 'bypass'.
-      #
-      #encryption-handling: default
-
-    dcerpc:
-      enabled: yes
-    ftp:
-      enabled: yes
-      # memcap: 64mb
-    rdp:
-      enabled: yes
-    ssh:
-      enabled: yes
-      hassh: yes
-    # HTTP2: Experimental HTTP 2 support. Disabled by default.
-    http2:
-      enabled: no
-    smtp:
-      enabled: yes
-      raw-extraction: no
-      # Configure SMTP-MIME Decoder
-      mime:
-        # Decode MIME messages from SMTP transactions
-        # (may be resource intensive)
-        # This field supersedes all others because it turns the entire
-        # process on or off
-        decode-mime: yes
-
-        # Decode MIME entity bodies (ie. Base64, quoted-printable, etc.)
-        decode-base64: yes
-        decode-quoted-printable: yes
-
-        # Maximum bytes per header data value stored in the data structure
-        # (default is 2000)
-        header-value-depth: 2000
-
-        # Extract URLs and save in state data structure
-        extract-urls: yes
-        # Set to yes to compute the md5 of the mail body. You will then
-        # be able to journalize it.
-        body-md5: yes
-      # Configure inspected-tracker for file_data keyword
-      inspected-tracker:
-        content-limit: 100000
-        content-inspect-min-size: 32768
-        content-inspect-window: 4096
-    imap:
-      enabled: detection-only
-    smb:
-      enabled: yes
-      detection-ports:
-        dp: 139, 445
-
-      # Stream reassembly size for SMB streams. By default track it completely.
-      #stream-depth: 0
-
-    nfs:
-      enabled: yes
-    tftp:
-      enabled: yes
-    dns:
-      tcp:
-        enabled: yes
-        detection-ports:
-          dp: 53
-      udp:
-        enabled: yes
-        detection-ports:
-          dp: 53
-    http:
-      enabled: yes
-      # memcap:                   Maximum memory capacity for HTTP
-      #                           Default is unlimited, values can be 64mb, e.g.
-
-      # default-config:           Used when no server-config matches
-      #   personality:            List of personalities used by default
-      #   request-body-limit:     Limit reassembly of request body for inspection
-      #                           by http_client_body & pcre /P option.
-      #   response-body-limit:    Limit reassembly of response body for inspection
-      #                           by file_data, http_server_body & pcre /Q option.
-      #
-      #   For advanced options, see the user guide
-
-
-      # server-config:            List of server configurations to use if address matches
-      #   address:                List of IP addresses or networks for this block
-      #   personality:            List of personalities used by this block
-      #
-      #                           Then, all the fields from default-config can be overloaded
-      #
-      # Currently Available Personalities:
-      #   Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
-      #   IIS_7_0, IIS_7_5, Apache_2
-      libhtp:
-         default-config:
-           personality: IDS
-
-           # Can be specified in kb, mb, gb.  Just a number indicates
-           # it's in bytes.
-           request-body-limit: 100kb
-           response-body-limit: 100kb
-
-           # inspection limits
-           request-body-minimal-inspect-size: 32kb
-           request-body-inspect-window: 4kb
-           response-body-minimal-inspect-size: 40kb
-           response-body-inspect-window: 16kb
-
-           # response body decompression (0 disables)
-           response-body-decompress-layer-limit: 2
-
-           # auto will use http-body-inline mode in IPS mode, yes or no set it statically
-           http-body-inline: auto
-
-           # Decompress SWF files.
-           # Two types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
-           # compress-depth:
-           # Specifies the maximum amount of data to decompress,
-           # set 0 for unlimited.
-           # decompress-depth:
-           # Specifies the maximum amount of decompressed data to obtain,
-           # set 0 for unlimited.
-           swf-decompression:
-             enabled: yes
-             type: both
-             compress-depth: 0
-             decompress-depth: 0
-
-           # Use a random value for inspection sizes around the specified value.
-           # This lowers the risk of some evasion techniques but could lead
-           # to detection change between runs. It is set to 'yes' by default.
-           #randomize-inspection-sizes: yes
-           # If "randomize-inspection-sizes" is active, the value of various
-           # inspection size will be chosen from the [1 - range%, 1 + range%]
-           # range
-           # Default value of "randomize-inspection-range" is 10.
-           #randomize-inspection-range: 10
-
-           # decoding
-           double-decode-path: no
-           double-decode-query: no
-
-           # Can enable LZMA decompression
-           #lzma-enabled: false
-           # Memory limit usage for LZMA decompression dictionary
-           # Data is decompressed until dictionary reaches this size
-           #lzma-memlimit: 1mb
-           # Maximum decompressed size with a compression ratio
-           # above 2048 (only LZMA can reach this ratio, deflate cannot)
-           #compression-bomb-limit: 1mb
-
-         server-config:
-
-           #- apache:
-           #    address: [192.168.1.0/24, 127.0.0.0/8, "::1"]
-           #    personality: Apache_2
-           #    # Can be specified in kb, mb, gb.  Just a number indicates
-           #    # it's in bytes.
-           #    request-body-limit: 4096
-           #    response-body-limit: 4096
-           #    double-decode-path: no
-           #    double-decode-query: no
-
-           #- iis7:
-           #    address:
-           #      - 192.168.0.0/24
-           #      - 192.168.10.0/24
-           #    personality: IIS_7_0
-           #    # Can be specified in kb, mb, gb.  Just a number indicates
-           #    # it's in bytes.
-           #    request-body-limit: 4096
-           #    response-body-limit: 4096
-           #    double-decode-path: no
-           #    double-decode-query: no
-
-    # Note: Modbus probe parser is minimalist due to the limited usage in the field.
-    # Only Modbus message length (greater than Modbus header length)
-    # and protocol ID (equal to 0) are checked in probing parser
-    # It is important to enable detection port and define Modbus port
-    # to avoid false positives
-    modbus:
-      # How many unanswered Modbus requests are considered a flood.
-      # If the limit is reached, the app-layer-event:modbus.flooded; will match.
-      #request-flood: 500
-
-      enabled: yes
-      detection-ports:
-        dp: 502
-      # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it
-      # is recommended to keep the TCP connection opened with a remote device
-      # and not to open and close it for each MODBUS/TCP transaction. In that
-      # case, it is important to set the depth of the stream reassembling as
-      # unlimited (stream.reassembly.depth: 0)
-
-      # Stream reassembly size for modbus. By default track it completely.
-      stream-depth: 0
-
-    # DNP3
-    dnp3:
-      enabled: yes
-      detection-ports:
-        dp: 20000
-
-    # SCADA EtherNet/IP and CIP protocol support
-    enip:
-      enabled: no
-      detection-ports:
-        dp: 44818
-        sp: 44818
-
-    ntp:
-      enabled: yes
-
-    dhcp:
-      enabled: no
-
-    sip:
-      enabled: yes
-
-# Limit for the maximum number of asn1 frames to decode (default 256)
-asn1-max-frames: 256
-
-# Datasets default settings
-# datasets:
-#   # Default fallback memcap and hashsize values for datasets in case these
-#   # were not explicitly defined.
-#   defaults:
-#     memcap: 100mb
-#     hashsize: 2048
-
-##############################################################################
-##
-## Advanced settings below
-##
-##############################################################################
-
-##
-## Run Options
-##
-
-# Run Suricata with a specific user-id and group-id:
-run-as:
-  user: suri
-  group: suri
-
-# Some logging modules will use that name in event as identifier. The default
-# value is the hostname
-#sensor-name: suricata
-
-# Default location of the pid file. The pid file is only used in
-# daemon mode (start Suricata with -D). If not running in daemon mode
-# the --pidfile command line option must be used to create a pid file.
-#pid-file: /var/run/suricata.pid
-
-# Daemon working directory
-# Suricata will change directory to this one if provided
-# Default: "/"
-#daemon-directory: "/"
-
-# Umask.
-# Suricata will use this umask if it is provided. By default it will use the
-# umask passed on by the shell.
-#umask: 022
-
-# Suricata core dump configuration. Limits the size of the core dump file to
-# approximately max-dump. The actual core dump size will be a multiple of the
-# page size. Core dumps that would be larger than max-dump are truncated. On
-# Linux, the actual core dump size may be a few pages larger than max-dump.
-# Setting max-dump to 0 disables core dumping.
-# Setting max-dump to 'unlimited' will give the full core dump file.
-# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size
-# to be 'unlimited'.
-
-coredump:
-  max-dump: unlimited
-
-# If the Suricata box is a router for the sniffed networks, set it to 'router'. If
-# it is a pure sniffing setup, set it to 'sniffer-only'.
-# If set to auto, the variable is internally switched to 'router' in IPS mode
-# and 'sniffer-only' in IDS mode.
-# This feature is currently only used by the reject* keywords.
-host-mode: auto
-
-# Number of packets preallocated per thread. The default is 1024. A higher number 
-# will make sure each CPU will be more easily kept busy, but may negatively 
-# impact caching.
-#max-pending-packets: 1024
-
-# Runmode the engine should use. Please check --list-runmodes to get the available
-# runmodes for each packet acquisition method. Default depends on selected capture
-# method. 'workers' generally gives best performance.
-#runmode: autofp
-
-# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
-#
-# Supported schedulers are:
-#
-# hash     - Flow assigned to threads using the 5-7 tuple hash.
-# ippair   - Flow assigned to threads using addresses only.
-#
-#autofp-scheduler: hash
-
-# Preallocated size for each packet. Default is 1514 which is the classical
-# size for pcap on Ethernet. You should adjust this value to the highest
-# packet size (MTU + hardware header) on your system.
-#default-packet-size: 1514
-
-# Unix command socket that can be used to pass commands to Suricata.
-# An external tool can then connect to get information from Suricata
-# or trigger some modifications of the engine. Set enabled to yes
-# to activate the feature. In auto mode, the feature will only be
-# activated in live capture mode. You can use the filename variable to set
-# the file name of the socket.
-unix-command:
-  enabled: yes
-  #filename: custom.socket
-
-# Magic file. The extension .mgc is added to the value here.
-#magic-file: /usr/share/file/magic
-magic-file: /usr/share/misc/magic.mgc 
-
-# GeoIP2 database file. Specify path and filename of GeoIP2 database
-# if using rules with "geoip" rule option.
-#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
-
-legacy:
-  uricontent: enabled
-
-##
-## Detection settings
-##
-
-# Set the order of alerts based on actions
-# The default order is pass, drop, reject, alert
-# action-order:
-#   - pass
-#   - drop
-#   - reject
-#   - alert
-
-# IP Reputation
-#reputation-categories-file: /etc/suricata/iprep/categories.txt
-#default-reputation-path: /etc/suricata/iprep
-#reputation-files:
-# - reputation.list
-
-# When run with the option --engine-analysis, the engine will read each of
-# the parameters below, and print reports for each of the enabled sections
-# and exit.  The reports are printed to a file in the default log dir
-# given by the parameter "default-log-dir", with engine reporting
-# subsection below printing reports in its own report file.
-engine-analysis:
-  # enables printing reports for fast-pattern for every rule.
-  rules-fast-pattern: yes
-  # enables printing reports for each rule
-  rules: yes
-
-#recursion and match limits for PCRE where supported
-pcre:
-  match-limit: 3500
-  match-limit-recursion: 1500
-
-##
-## Advanced Traffic Tracking and Reconstruction Settings
-##
-
-# Host specific policies for defragmentation and TCP stream
-# reassembly. The host OS lookup is done using a radix tree, just
-# like a routing table so the most specific entry matches.
-host-os-policy:
-  # Make the default policy windows.
-  windows: [0.0.0.0/0]
-  bsd: []
-  bsd-right: []
-  old-linux: []
-  linux: []
-  old-solaris: []
-  solaris: []
-  hpux10: []
-  hpux11: []
-  irix: []
-  macos: []
-  vista: []
-  windows2k3: []
-
-# Defrag settings:
-
-defrag:
-  memcap: 32mb
-  hash-size: 65536
-  trackers: 65535 # number of defragmented flows to follow
-  max-frags: 65535 # number of fragments to keep (higher than trackers)
-  prealloc: yes
-  timeout: 60
-
-# Enable defrag per host settings
-#  host-config:
-#
-#    - dmz:
-#        timeout: 30
-#        address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"]
-#
-#    - lan:
-#        timeout: 45
-#        address:
-#          - 192.168.0.0/24
-#          - 192.168.10.0/24
-#          - 172.16.14.0/24
-
-# Flow settings:
-# By default, the reserved memory (memcap) for flows is 32MB. This is the limit
-# for flow allocation inside the engine. You can change this value to allow
-# more memory usage for flows.
-# The hash-size determines the size of the hash used to identify flows inside
-# the engine, and by default the value is 65536.
-# At startup, the engine can preallocate a number of flows, to get better
-# performance. The number of flows preallocated is 10000 by default.
-# emergency-recovery is the percentage of flows that the engine needs to
-# prune before clearing the emergency state. The emergency state is activated
-# when the memcap limit is reached, allowing new flows to be created, but
-# pruning them with the emergency timeouts (they are defined below).
-# If the memcap is reached, the engine will try to prune flows
-# with the default timeouts. If it doesn't find a flow to prune, it will set
-# the emergency bit and it will try again with more aggressive timeouts.
-# If that doesn't work, then it will try to kill the oldest flows using
-# last time seen flows.
-# The memcap can be specified in kb, mb, gb.  Just a number indicates it's
-# in bytes.
-
-flow:
-  memcap: 128mb
-  hash-size: 65536
-  prealloc: 10000
-  emergency-recovery: 30
-  #managers: 1 # default to one flow manager
-  #recyclers: 1 # default to one flow recycler thread
-
-# This option controls the use of VLAN ids in the flow (and defrag)
-# hashing. Normally this should be enabled, but in some (broken)
-# setups where both sides of a flow are not tagged with the same VLAN
-# tag, we can ignore the VLAN id's in the flow hashing.
-vlan:
-  use-for-tracking: true
-
-# Specific timeouts for flows. Here you can specify the timeouts that the
-# active flows will wait to transit from the current state to another, on each
-# protocol. The value of "new" determines the seconds to wait after a handshake or
-# stream startup before the engine frees the data of that flow it doesn't
-# change the state to established (usually if we don't receive more packets
-# of that flow). The value of "established" is the amount of
-# seconds that the engine will wait to free the flow if that time elapses
-# without receiving new packets or closing the connection. "closed" is the
-# amount of time to wait after a flow is closed (usually zero). "bypassed"
-# timeout controls locally bypassed flows. For these flows we don't do any other
-# tracking. If no packets have been seen after this timeout, the flow is discarded.
-#
-# There's an emergency mode that will become active under attack circumstances,
-# making the engine to check flow status faster. This configuration variables
-# use the prefix "emergency-" and work similar as the normal ones.
-# Some timeouts doesn't apply to all the protocols, like "closed", for udp and
-# icmp.
-
-flow-timeouts:
-
-  default:
-    new: 30
-    established: 300
-    closed: 0
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-closed: 0
-    emergency-bypassed: 50
-  tcp:
-    new: 60
-    established: 600
-    closed: 60
-    bypassed: 100
-    emergency-new: 5
-    emergency-established: 100
-    emergency-closed: 10
-    emergency-bypassed: 50
-  udp:
-    new: 30
-    established: 300
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-bypassed: 50
-  icmp:
-    new: 30
-    established: 300
-    bypassed: 100
-    emergency-new: 10
-    emergency-established: 100
-    emergency-bypassed: 50
-
-# Stream engine settings. Here the TCP stream tracking and reassembly
-# engine is configured.
-#
-# stream:
-#   memcap: 32mb                # Can be specified in kb, mb, gb.  Just a
-#                               # number indicates it's in bytes.
-#   checksum-validation: yes    # To validate the checksum of received
-#                               # packet. If csum validation is specified as
-#                               # "yes", then packets with invalid csum values will not
-#                               # be processed by the engine stream/app layer.
-#                               # Warning: locally generated traffic can be
-#                               # generated without checksum due to hardware offload
-#                               # of checksum. You can control the handling of checksum
-#                               # on a per-interface basis via the 'checksum-checks'
-#                               # option
-#   prealloc-sessions: 2k       # 2k sessions prealloc'd per stream thread
-#   midstream: false            # don't allow midstream session pickups
-#   async-oneside: false        # don't enable async stream handling
-#   inline: no                  # stream inline mode
-#   drop-invalid: yes           # in inline mode, drop packets that are invalid with regards to streaming engine
-#   max-synack-queued: 5        # Max different SYN/ACKs to queue
-#   bypass: no                  # Bypass packets when stream.reassembly.depth is reached.
-#                               # Warning: first side to reach this triggers
-#                               # the bypass.
-#
-#   reassembly:
-#     memcap: 64mb              # Can be specified in kb, mb, gb.  Just a number
-#                               # indicates it's in bytes.
-#     depth: 1mb                # Can be specified in kb, mb, gb.  Just a number
-#                               # indicates it's in bytes.
-#     toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
-#                               # this size.  Can be specified in kb, mb,
-#                               # gb.  Just a number indicates it's in bytes.
-#     toclient-chunk-size: 2560 # inspect raw stream in chunks of at least
-#                               # this size.  Can be specified in kb, mb,
-#                               # gb.  Just a number indicates it's in bytes.
-#     randomize-chunk-size: yes # Take a random value for chunk size around the specified value.
-#                               # This lowers the risk of some evasion techniques but could lead
-#                               # to detection change between runs. It is set to 'yes' by default.
-#     randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is
-#                               # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size
-#                               # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same
-#                               # calculation for toclient-chunk-size.
-#                               # Default value of randomize-chunk-range is 10.
-#
-#     raw: yes                  # 'Raw' reassembly enabled or disabled.
-#                               # raw is for content inspection by detection
-#                               # engine.
-#
-#     segment-prealloc: 2048    # number of segments preallocated per thread
-#
-#     check-overlap-different-data: true|false
-#                               # check if a segment contains different data
-#                               # than what we've already seen for that
-#                               # position in the stream.
-#                               # This is enabled automatically if inline mode
-#                               # is used or when stream-event:reassembly_overlap_different_data;
-#                               # is used in a rule.
-#
-stream:
-  memcap: 64mb
-  checksum-validation: yes      # reject incorrect csums
-  inline: auto                  # auto will use inline mode in IPS mode, yes or no set it statically
-  reassembly:
-    memcap: 256mb
-    depth: 1mb                  # reassemble 1mb into a stream
-    toserver-chunk-size: 2560
-    toclient-chunk-size: 2560
-    randomize-chunk-size: yes
-    #randomize-chunk-range: 10
-    #raw: yes
-    #segment-prealloc: 2048
-    #check-overlap-different-data: true
-
-# Host table:
-#
-# Host table is used by the tagging and per host thresholding subsystems.
-#
-host:
-  hash-size: 4096
-  prealloc: 1000
-  memcap: 32mb
-
-# IP Pair table:
-#
-# Used by xbits 'ippair' tracking.
-#
-#ippair:
-#  hash-size: 4096
-#  prealloc: 1000
-#  memcap: 32mb
-
-# Decoder settings
-
-decoder:
-  # Teredo decoder is known to not be completely accurate
-  # as it will sometimes detect non-teredo as teredo.
-  teredo:
-    enabled: true
-    # ports to look for Teredo. Max 4 ports. If no ports are given, or
-    # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
-    ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
-
-  # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
-  # IANA assigned port 4789 is enabled.
-  vxlan:
-    enabled: true
-    ports: $VXLAN_PORTS # syntax: '[8472, 4789]' or '4789'.
-
-  # Geneve decoder is assigned to up to 4 UDP ports. By default only the
-  # IANA assigned port 6081 is enabled.
-  geneve:
-    enabled: true
-    ports: $GENEVE_PORTS # syntax: '[6081, 1234]' or '6081'.
-
-##
-## Performance tuning and profiling
-##
-
-# The detection engine builds internal groups of signatures. The engine
-# allows us to specify the profile to use for them, to manage memory in an
-# efficient way keeping good performance. For the profile keyword you
-# can use the words "low", "medium", "high" or "custom". If you use custom,
-# make sure to define the values in the "custom-values" section.
-# Usually you would prefer medium/high/low.
-#
-# "sgh mpm-context", indicates how the staging should allot mpm contexts for
-# the signature groups.  "single" indicates the use of a single context for
-# all the signature group heads.  "full" indicates a mpm-context for each
-# group head.  "auto" lets the engine decide the distribution of contexts
-# based on the information the engine gathers on the patterns from each
-# group head.
-#
-# The option inspection-recursion-limit is used to limit the recursive calls
-# in the content inspection code.  For certain payload-sig combinations, we
-# might end up taking too much time in the content inspection code.
-# If the argument specified is 0, the engine uses an internally defined
-# default limit.  When a value is not specified, there are no limits on the recursion.
-detect:
-  profile: medium
-  custom-values:
-    toclient-groups: 3
-    toserver-groups: 25
-  sgh-mpm-context: auto
-  inspection-recursion-limit: 3000
-  # If set to yes, the loading of signatures will be made after the capture
-  # is started. This will limit the downtime in IPS mode.
-  #delayed-detect: yes
-
-  prefilter:
-    # default prefiltering setting. "mpm" only creates MPM/fast_pattern
-    # engines. "auto" also sets up prefilter engines for other keywords.
-    # Use --list-keywords=all to see which keywords support prefiltering.
-    default: mpm
-
-  # the grouping values above control how many groups are created per
-  # direction. Port whitelisting forces that port to get its own group.
-  # Very common ports will benefit, as well as ports with many expensive
-  # rules.
-  grouping:
-    #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080
-    #udp-whitelist: 53, 135, 5060
-
-  profiling:
-    # Log the rules that made it past the prefilter stage, per packet
-    # default is off. The threshold setting determines how many rules
-    # must have made it past pre-filter for that rule to trigger the
-    # logging.
-    #inspect-logging-threshold: 200
-    grouping:
-      dump-to-disk: false
-      include-rules: false      # very verbose
-      include-mpm-stats: false
-
-# Select the multi pattern algorithm you want to run for scan/search the
-# in the engine.
-#
-# The supported algorithms are:
-# "ac"      - Aho-Corasick, default implementation
-# "ac-bs"   - Aho-Corasick, reduced memory implementation
-# "ac-ks"   - Aho-Corasick, "Ken Steele" variant
-# "hs"      - Hyperscan, available when built with Hyperscan support
-#
-# The default mpm-algo value of "auto" will use "hs" if Hyperscan is
-# available, "ac" otherwise.
-#
-# The mpm you choose also decides the distribution of mpm contexts for
-# signature groups, specified by the conf - "detect.sgh-mpm-context".
-# Selecting "ac" as the mpm would require "detect.sgh-mpm-context"
-# to be set to "single", because of ac's memory requirements, unless the
-# ruleset is small enough to fit in memory, in which case one can
-# use "full" with "ac".  The rest of the mpms can be run in "full" mode.
-
-mpm-algo: auto
-
-# Select the matching algorithm you want to use for single-pattern searches.
-#
-# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only
-# available if Suricata has been built with Hyperscan support).
-#
-# The default of "auto" will use "hs" if available, otherwise "bm".
-
-spm-algo: auto
-
-# Suricata is multi-threaded. Here the threading can be influenced.
-threading:
-  set-cpu-affinity: no
-  # Tune cpu affinity of threads. Each family of threads can be bound
-  # to specific CPUs.
-  #
-  # These 2 apply to the all runmodes:
-  # management-cpu-set is used for flow timeout handling, counters
-  # worker-cpu-set is used for 'worker' threads
-  #
-  # Additionally, for autofp these apply:
-  # receive-cpu-set is used for capture threads
-  # verdict-cpu-set is used for IPS verdict threads
-  #
-  cpu-affinity:
-    - management-cpu-set:
-        cpu: [ 0 ]  # include only these CPUs in affinity settings
-    - receive-cpu-set:
-        cpu: [ 0 ]  # include only these CPUs in affinity settings
-    - worker-cpu-set:
-        cpu: [ "all" ]
-        mode: "exclusive"
-        # Use explicitly 3 threads and don't compute number by using
-        # detect-thread-ratio variable:
-        # threads: 3
-        prio:
-          low: [ 0 ]
-          medium: [ "1-2" ]
-          high: [ 3 ]
-          default: "medium"
-    #- verdict-cpu-set:
-    #    cpu: [ 0 ]
-    #    prio:
-    #      default: "high"
-  #
-  # By default Suricata creates one "detect" thread per available CPU/CPU core.
-  # This setting allows controlling this behaviour. A ratio setting of 2 will
-  # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
-  # will result in 4 detect threads. If values below 1 are used, less threads
-  # are created. So on a dual core CPU a setting of 0.5 results in 1 detect
-  # thread being created. Regardless of the setting at a minimum 1 detect
-  # thread will always be created.
-  #
-  detect-thread-ratio: 1.0
-
-# Luajit has a strange memory requirement, its 'states' need to be in the
-# first 2G of the process' memory.
-#
-# 'luajit.states' is used to control how many states are preallocated.
-# State use: per detect script: 1 per detect thread. Per output script: 1 per
-# script.
-luajit:
-  states: 128
-
-# Profiling settings. Only effective if Suricata has been built with
-# the --enable-profiling configure flag.
-#
-profiling:
-  # Run profiling for every X-th packet. The default is 1, which means we
-  # profile every packet. If set to 1000, one packet is profiled for every
-  # 1000 received.
-  #sample-rate: 1000
-
-  # rule profiling
-  rules:
-
-    # Profiling can be disabled here, but it will still have a
-    # performance impact if compiled in.
-    enabled: no
-    filename: rule_perf.log
-    append: yes
-
-    # Sort options: ticks, avgticks, checks, matches, maxticks
-    # If commented out all the sort options will be used.
-    #sort: avgticks
-
-    # Limit the number of sids for which stats are shown at exit (per sort).
-    limit: 10
-
-    # output to json
-    json: no
-
-  # per keyword profiling
-  keywords:
-    enabled: no
-    filename: keyword_perf.log
-    append: yes
-
-  prefilter:
-    enabled: no
-    filename: prefilter_perf.log
-    append: yes
-
-  # per rulegroup profiling
-  rulegroups:
-    enabled: no
-    filename: rule_group_perf.log
-    append: yes
-
-  # packet profiling
-  packets:
-
-    # Profiling can be disabled here, but it will still have a
-    # performance impact if compiled in.
-    enabled: no
-    filename: packet_stats.log
-    append: yes
-
-    # per packet csv output
-    csv:
-
-      # Output can be disabled here, but it will still have a
-      # performance impact if compiled in.
-      enabled: no
-      filename: packet_stats.csv
-
-  # profiling of locking. Only available when Suricata was built with
-  # --enable-profiling-locks.
-  locks:
-    enabled: no
-    filename: lock_stats.log
-    append: yes
-
-  pcap-log:
-    enabled: no
-    filename: pcaplog_stats.log
-    append: yes
-
-##
-## Netfilter integration
-##
-
-# When running in NFQ inline mode, it is possible to use a simulated
-# non-terminal NFQUEUE verdict.
-# This permits sending all needed packet to Suricata via this rule:
-#        iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
-# And below, you can have your standard filtering ruleset. To activate
-# this mode, you need to set mode to 'repeat'
-# If you want a packet to be sent to another queue after an ACCEPT decision
-# set the mode to 'route' and set next-queue value.
-# On Linux >= 3.1, you can set batchcount to a value > 1 to improve performance
-# by processing several packets before sending a verdict (worker runmode only).
-# On Linux >= 3.6, you can set the fail-open option to yes to have the kernel
-# accept the packet if Suricata is not able to keep pace.
-# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
-# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
-# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
-# directly accept all packets of a flow once a packet has been marked.
-nfq:
-#  mode: accept
-#  repeat-mark: 1
-#  repeat-mask: 1
-#  bypass-mark: 1
-#  bypass-mask: 1
-#  route-queue: 2
-#  batchcount: 20
-#  fail-open: yes
-
-#nflog support
-nflog:
-    # netlink multicast group
-    # (the same as the iptables --nflog-group param)
-    # Group 0 is used by the kernel, so you can't use it
-  - group: 2
-    # netlink buffer size
-    buffer-size: 18432
-    # put default value here
-  - group: default
-    # set number of packets to queue inside kernel
-    qthreshold: 1
-    # set the delay before flushing packet in the kernel's queue
-    qtimeout: 100
-    # netlink max buffer size
-    max-size: 20000
-
-##
-## Advanced Capture Options
-##
-
-# General settings affecting packet capture
-capture:
-  # disable NIC offloading. It's restored when Suricata exits.
-  # Enabled by default.
-  #disable-offloading: false
-  #
-  # disable checksum validation. Same as setting '-k none' on the
-  # commandline.
-  #checksum-validation: none
-
-# Netmap support
-#
-# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which has
-# built-in Netmap support or compile and install the Netmap module and appropriate
-# NIC driver for your Linux system.
-# To reach maximum throughput disable all receive-, segmentation-,
-# checksum- offloading on your NIC (using ethtool or similar).
-# Disabling TX checksum offloading is *required* for connecting OS endpoint
-# with NIC endpoint.
-# You can find more information at https://github.com/luigirizzo/netmap
-#
-netmap:
-   # To specify OS endpoint add plus sign at the end (e.g. "eth0+")
- - interface: eth2
-   # Number of capture threads. "auto" uses number of RSS queues on interface.
-   # Warning: unless the RSS hashing is symmetrical, this will lead to
-   # accuracy issues.
-   #threads: auto
-   # You can use the following variables to activate netmap tap or IPS mode.
-   # If copy-mode is set to ips or tap, the traffic coming to the current
-   # interface will be copied to the copy-iface interface. If 'tap' is set, the
-   # copy is complete. If 'ips' is set, the packet matching a 'drop' action
-   # will not be copied.
-   # To specify the OS as the copy-iface (so the OS can route packets, or forward
-   # to a service running on the same machine) add a plus sign at the end
-   # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0
-   # for return packets. Hardware checksumming must be *off* on the interface if
-   # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD
-   # or 'ethtool -K eth0 tx off rx off' for Linux).
-   #copy-mode: tap
-   #copy-iface: eth3
-   # Set to yes to disable promiscuous mode
-   # disable-promisc: no
-   # Choose checksum verification mode for the interface. At the moment
-   # of the capture, some packets may have an invalid checksum due to
-   # the checksum computation being offloaded to the network card.
-   # Possible values are:
-   #  - yes: checksum validation is forced
-   #  - no: checksum validation is disabled
-   #  - auto: Suricata uses a statistical approach to detect when
-   #  checksum off-loading is used.
-   # Warning: 'checksum-validation' must be set to yes to have any validation
-   #checksum-checks: auto
-   # BPF filter to apply to this interface. The pcap filter syntax apply here.
-   #bpf-filter: port 80 or udp
- #- interface: eth3
-   #threads: auto
-   #copy-mode: tap
-   #copy-iface: eth2
-   # Put default values here
- - interface: default
-
-# PF_RING configuration: for use with native PF_RING support
-# for more info see http://www.ntop.org/products/pf_ring/
-pfring:
-  - interface: eth0
-    # Number of receive threads. If set to 'auto' Suricata will first try
-    # to use CPU (core) count and otherwise RSS queue count.
-    threads: auto
-
-    # Default clusterid.  PF_RING will load balance packets based on flow.
-    # All threads/processes that will participate need to have the same
-    # clusterid.
-    cluster-id: 99
-
-    # Default PF_RING cluster type. PF_RING can load balance per flow.
-    # Possible values are cluster_flow or cluster_round_robin.
-    cluster-type: cluster_flow
-
-    # bpf filter for this interface
-    #bpf-filter: tcp
-
-    # If bypass is set then the PF_RING hw bypass is activated, when supported
-    # by the network interface. Suricata will instruct the interface to bypass
-    # all future packets for a flow that need to be bypassed.
-    #bypass: yes
-
-    # Choose checksum verification mode for the interface. At the moment
-    # of the capture, some packets may have an invalid checksum due to
-    # the checksum computation being offloaded to the network card.
-    # Possible values are:
-    #  - rxonly: only compute checksum for packets received by network card.
-    #  - yes: checksum validation is forced
-    #  - no: checksum validation is disabled
-    #  - auto: Suricata uses a statistical approach to detect when
-    #  checksum off-loading is used. (default)
-    # Warning: 'checksum-validation' must be set to yes to have any validation
-    #checksum-checks: auto
-  # Second interface
-  #- interface: eth1
-  #  threads: 3
-  #  cluster-id: 93
-  #  cluster-type: cluster_flow
-  # Put default values here
-  - interface: default
-    #threads: 2
-
-# For FreeBSD ipfw(8) divert(4) support.
-# Please make sure you have ipfw_load="YES" and ipdivert_load="YES"
-# in /etc/loader.conf or kldload'ing the appropriate kernel modules.
-# Additionally, you need to have an ipfw rule for the engine to see
-# the packets from ipfw.  For Example:
-#
-#   ipfw add 100 divert 8000 ip from any to any
-#
-# N.B. This example uses "8000" -- this number must mach the values
-# you passed on the command line, i.e., -d 8000
-#
-ipfw:
-
-  # Reinject packets at the specified ipfw rule number.  This config
-  # option is the ipfw rule number AT WHICH rule processing continues
-  # in the ipfw processing system after the engine has finished
-  # inspecting the packet for acceptance.  If no rule number is specified,
-  # accepted packets are reinjected at the divert rule which they entered
-  # and IPFW rule processing continues.  No check is done to verify
-  # this will rule makes sense so care must be taken to avoid loops in ipfw.
-  #
-  ## The following example tells the engine to reinject packets
-  # back into the ipfw firewall AT rule number 5500:
-  #
-  # ipfw-reinjection-rule-number: 5500
-
-
-napatech:
-    # The Host Buffer Allowance for all streams
-    # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
-    # This may be enabled when sharing streams with another application.
-    # Otherwise, it should be turned off.
-    #hba: -1
-
-    # When use_all_streams is set to "yes" the initialization code will query
-    # the Napatech service for all configured streams and listen on all of them.
-    # When set to "no" the streams config array will be used.
-    #
-    # This option necessitates running the appropriate NTPL commands to create
-    # the desired streams prior to running Suricata.
-    #use-all-streams: no
-
-    # The streams to listen on when auto-config is disabled or when and threading
-    # cpu-affinity is disabled.  This can be either:
-    #   an individual stream (e.g. streams: [0])
-    # or
-    #   a range of streams (e.g. streams: ["0-3"])
-    #
-    streams: ["0-3"]
-
-    # Stream stats can be enabled to provide fine grain packet and byte counters
-    # for each thread/stream that is configured.
-    #
-    enable-stream-stats: no
-
-    # When auto-config is enabled the streams will be created and assigned
-    # automatically to the NUMA node where the thread resides.  If cpu-affinity
-    # is enabled in the threading section.  Then the streams will be created
-    # according to the number of worker threads specified in the worker-cpu-set.
-    # Otherwise, the streams array is used to define the streams.
-    #
-    # This option is intended primarily to support legacy configurations.
-    #
-    # This option cannot be used simultaneously with either "use-all-streams"
-    # or "hardware-bypass".
-    #
-    auto-config: yes
-
-    # Enable hardware level flow bypass.
-    #
-    hardware-bypass: yes
-
-    # Enable inline operation.  When enabled traffic arriving on a given port is
-    # automatically forwarded out its peer port after analysis by Suricata.
-    #
-    inline: no
-
-    # Ports indicates which Napatech ports are to be used in auto-config mode.
-    # these are the port IDs of the ports that will be merged prior to the
-    # traffic being distributed to the streams.
-    #
-    # When hardware-bypass is enabled the ports must be configured as a segment.
-    # specify the port(s) on which upstream and downstream traffic will arrive.
-    # This information is necessary for the hardware to properly process flows.
-    #
-    # When using a tap configuration one of the ports will receive inbound traffic
-    # for the network and the other will receive outbound traffic. The two ports on a
-    # given segment must reside on the same network adapter.
-    #
-    # When using a SPAN-port configuration the upstream and downstream traffic
-    # arrives on a single port. This is configured by setting the two sides of the
-    # segment to reference the same port.  (e.g. 0-0 to configure a SPAN port on
-    # port 0).
-    #
-    # port segments are specified in the form:
-    #    ports: [0-1,2-3,4-5,6-6,7-7]
-    #
-    # For legacy systems when hardware-bypass is disabled this can be specified in any
-    # of the following ways:
-    #
-    #   a list of individual ports (e.g. ports: [0,1,2,3])
-    #
-    #   a range of ports (e.g. ports: [0-3])
-    #
-    #   "all" to indicate that all ports are to be merged together
-    #   (e.g. ports: [all])
-    #
-    # This parameter has no effect if auto-config is disabled.
-    #
-    ports: [0-1,2-3]
-
-    # When auto-config is enabled the hashmode specifies the algorithm for
-    # determining to which stream a given packet is to be delivered.
-    # This can be any valid Napatech NTPL hashmode command.
-    #
-    # The most common hashmode commands are:  hash2tuple, hash2tuplesorted,
-    # hash5tuple, hash5tuplesorted and roundrobin.
-    #
-    # See Napatech NTPL documentation other hashmodes and details on their use.
-    #
-    # This parameter has no effect if auto-config is disabled.
-    #
-    hashmode: hash5tuplesorted
-
-##
-## Configure Suricata to load Suricata-Update managed rules.
-##
-
-default-rule-path: /var/lib/suricata/rules
-rule-files:
- - suricata.rules
-
-##
-## Auxiliary configuration files.
-##
-
-classification-file: /var/lib/suricata/rules/classification.config
-reference-config-file: /etc/suricata/reference.config
-# threshold-file: /etc/suricata/threshold.config
-
-##
-## Include other configs
-##
-
-# Includes:  Files included here will be handled as if they were in-lined
-# in this configuration file. Files with relative pathnames will be
-# searched for in the same directory as this configuration file. You may
-# use absolute pathnames too.
-# You can specify more than 2 configuration files, if needed.
-#include: include1.yaml
-#include: include2.yaml