################################################################################################## ## ## ## Default configurations to use ## ## - Cassandra for storing events ## ## - PostgreSQL for building up projections database for case queries ## ## ## ## Specific settings are passed as environment variables ## ## ## ################################################################################################## pekko { loglevel = DEBUG loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"] logger-startup-timeout = 10s actor { processtask-dispatcher { type = Dispatcher executor = "thread-pool-executor" thread-pool-executor { fixed-pool-size = 64 } throughput = 1 } localrouter-mailbox { mailbox-type = "org.apache.pekko.dispatch.SingleConsumerOnlyUnboundedMailbox" mailbox-capacity = 1500 } deployment { "/default-router" { mailbox = "pekko.actor.localrouter-mailbox" } "/cases" { mailbox = "pekko.actor.localrouter-mailbox" } "/process-tasks/*" { dispatcher = "pekko.actor.processtask-dispatcher" } } serialize-messages = true serializers { cafienne_serializer = "org.cafienne.infrastructure.serialization.CafienneSerializer" jackson-json = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" } serialization-bindings { "org.cafienne.infrastructure.serialization.CafienneSerializable" = cafienne_serializer "org.cafienne.infrastructure.serialization.JacksonSerializable" = jackson-json } } persistence { journal { plugin = "pekko.persistence.cassandra.journal" auto-start-journals = ["pekko.persistence.cassandra.journal"] } snapshot-store { plugin = "pekko.persistence.cassandra.snapshot" } } } cafienne { # Engine wide platform settings platform { # Platform has owners that are allowed to create/disable/enable tenants # This property specifies the set of user-id's that are owners # This array may not be empty. owners = ["admin"] owners = ${?CAFIENNE_PLATFORM_OWNERS} # Default tenant will be used when a user does not provide the tenant as a parameter to # the API call (e.g. in StartCase). When the user is member of only one tenant, # then that tenant will be submitted along with the StartCase command. # If the user belongs to multiple tenants, then this default-tenant option will be passed. default-tenant = "world" default-tenant = ${?CAFIENNE_PLATFORM_DEFAULT_TENANT} # bootstrap-tenants holds a reference to zero or more json (or yaml) files that have default tenant information. # Each file is should contain information for one tenant (tenant name, users and owners). # During launch of the case engine, the files will be scanned and a CreateTenant command is sent # into the system, thereby setting up one or more default tenants. # If the bootstrap-tenants property is not filled and the default-tenant has a value, the system will search for # a file that holds the default tenant name plus either a .conf, .json, .yml or .yaml extension. # E.g. in case default-tenant = 'world', the system would search for existence in the following order: # - 'world.conf' # - 'world.json' # - 'world.yml' # - 'world.yaml' # If none of these files are found, the bootstrap attempt will be skipped. bootstrap-tenants = ["bootstrap/world.tenant.conf"] } engine { # Properties for sending tasks of type Mail mail-service { # Here you can fill any regular jakarta.mail properties # All properties mentioned here are passed into the connection with the mail server mail.host = localhost mail.smtp.port = 1025 # Optional username/pwd to be used to connect to the mail server authentication { user = "" password = "" } } # Properties to configure for the timer service timer-service { # Timer service window contains the period ahead to fetch timers from storage and schedule in memory window = 65m # Interval determines the period to wait until loading the next timer window, defaults to once per hour interval = 1h } # Properties to configure storage (archiving and deletion functionality) storage-service { # Configuration of the archive mechanism - where to archive to and restore from archive { plugin = "file" file { directory = "./archive" } } # By default, when the engine starts, it checks whether any running storage processes # have to be recovered. This can be disabled through this property. disable-recovery = false } } api { bindhost = "0.0.0.0" bindport = 2027 security { # configuration settings for OpenID Connect oidc { ################################################################################## ## Below settings can be used to configure to the untrustworthy token service ## ## Only use this to run the Cafienne Testscript Framework for test purposes. ## ## Never use this in production systems. ## ################################################################################## connect-url = "http://localhost:2377/.well-known/openid-configuration" token-url = "http://localhost:2377/token" key-url = "http://localhost:2377/keys" authorization-url = "http://localhost:2377/auth" issuer = "http://localhost:2377" } # The subject of a valid JWT token is used to query the corresponging registered platform user from the database. # These identities can be cached to avoid repeated queries and thereby improve throughput times. # The size of the cache can be set here, it defaults to 1000 # The cache is disabled if size is 0 or a negative number. identity.cache.size = 1000 ################################################################################################### ## ## ## Fill this setting to true to allow developers to access engine events without authentication ## ## ## ## WARNING - Enabling opens up the full engine in read-only mode for anyone to access ## ## ## ################################################################################################### debug.events.open = true debug.events.open = ${?CAFIENNE_DEBUG_EVENTS} } } # The case engine supports various ways to list, load and deploy case definitions. # The below settings can be used to configure various options; default settings use # a file based definition provider. # An alternative is to use the StartCaseDefinitionProvider, in which # case definitions must be passed along with the StartCase REST API itself. # # The case engine reads definitions as XML files from disk and/or the classpath. # The files are cached in-memory, based on their lastModified timestamp # (i.e., if you change a file on disk, the engine will reload it into the cache). # By default, the engine will read from the configured location. If the definitions file cannot be found # in this location, the engine will try to load it as a resource from the classpath, hence enabling to ship # fixed definitions in a jar file. definitions { # Default provider is based on reading/writing from the local file system provider = "org.cafienne.cmmn.repository.file.FileBasedDefinitionProvider" location = "./definitions" location = ${?CAFIENNE_CMMN_DEFINITIONS_PATH} cache { size = 100 } # Use the below provider to start cases while passing the definition along the StartCase call # Note that the StartCaseDefinitionProvider also makes use of the same cache settings # provider = "org.cafienne.cmmn.repository.StartCaseDefinitionProvider" } actor { # the seconds of wait time before a response to a command is expected # by the http command routes ask-timeout = 60 # the seconds of idle time after which a case actor is removed from memory # if the case has not received new commands after the specified number of seconds, # the case engine will ask the actor system to remove the case from memory to avoid memory leaks. idle-period = 600 # If debug is true, then all StartCase commands by default will run in debug mode, # unless specified otherwise in the command debug = false } # This setting tells cafienne which journal to use for reading events. # If omitted, cafienne will try to guess the read journal, based on the default settings read-journal = "cassandra.query" query-db { profile = "slick.jdbc.PostgresProfile$" profile = ${?QUERY_DB_PROFILE} db { driver = "org.postgresql.Driver" driver = ${?QUERY_DB_DRIVER} url = "" url = ${QUERY_DB_URL} ################################################################### ## ## ## MAKE SURE TO FILL USER AND PASSWORD FOR CONNECTION ## ## ## ################################################################### user = "postgresuser" user = ${?QUERY_DB_USER} password = "mysecret" password = ${?QUERY_DB_PASSWORD} numThreads = 10 connectionTimeout = 5000 validationTimeout = 5000 } # Configuration options handling exceptions that may occur while reading # the event streams that populate the query-db tables # See also https://pekko.apache.org/docs/pekko/current/stream/stream-error.html#delayed-restarts-with-a-backoff-operator restart-stream { min-back-off = 500ms max-back-off = 30s random-factor = 0.20 max-restarts = 20 max-restarts-within = 5m } } } ############################################################################################# ## ## ## Below are settings for Cassandra journal db ## ## More configuration properties can be found at: ## ## https://pekko.apache.org/docs/pekko-persistence-cassandra/current/configuration.html ## ## ## ############################################################################################# pekko.persistence.cassandra { # meta columns were added in version 0.55. If you don't alter existing messages table and still # use `tables-autocreate=on` you have to set this property to off. # When trying to create the materialized view with the meta columns before corresponding columns # have been added the messages table an exception "Undefined column name meta_ser_id" is raised, # because Cassandra validates the "CREATE MATERIALIZED VIEW IF NOT EXISTS" # even though the view already exists and will not be created. To work around that issue you can disable the # meta data columns in the materialized view by setting this property to off. meta-in-events-by-tag-view = off journal { # Parameter indicating whether the journal keyspace should be auto created. # Not all Cassandra settings are configurable when using autocreate and for # full control of the keyspace and table definitions you should create them # manually (with a script). keyspace-autocreate = true # Parameter indicating whether the journal tables should be auto created # Not all Cassandra settings are configurable when using autocreate and for # full control of the keyspace and table definitions you should create them # manually (with a script). tables-autocreate = true } query { # New events are retrieved (polled) with this interval. refresh-interval = 100ms } events-by-tag { # Enable DistributedPubSub to announce events for a specific tag have # been written. These announcements cause any ongoing getEventsByTag to immediately re-poll, rather than # wait. In order enable this feature, make the following settings: # # - enable clustering for your actor system # - pekko.persistence.cassandra.pubsub-notification=on (send real-time announcements at most every sec) # # Setting pubsub-notification to "off" will disable the journal sending these announcements. # When enabled with `on` it will throttle the number of notfication messages per tag to at most 10 per second. # Alternatively a duration can be defined, such as pubsub-notification=300ms, which will be throttling interval # for sending the notifications. pubsub-notification = on # Configure this to the first bucket eventByTag queries will start from in the format # yyyyMMddTHH:mm yyyyMMdd is also supported if using Day as a bucket size # Will be rounded down to the start of whatever time bucket it falls into # When NoOffset is used it will look for events from this day and forward. first-time-bucket = "20241101T00:00" # When a new persistenceId is found in an eventsByTag query that wasn't found in the initial offset scanning # period as it didn't have any events in the current time bucket, this is how long the stage will delay events # looking for any lower tag pid sequence nrs. 0s means that the found event is assumed to be the first. # The edge case is if events for a not previously seen persistenceId come out of order then if this is set to # 0s the newer event will be delivered and when the older event is found the stream will fail as events have # to be delivered in order. # By default this is disabled, but for low (< 1 s) eventual-consistency-delay it can be set to something like # 100ms for reduced risk of missing event for edge case when events are seen in "wrong" order. new-persistence-id-scan-timeout = 100ms # For eventsByTag queries how long to delay the query for by setting the upper bound of the event TimeUUID to be slightly in the past. # This is very important as event writes that come from different nodes # will have slightly different clocks meaning events aren't received in TimeUUID order in C*. # Keeping clocks in sync with ntp is not sufficient to prevent this as it only takes a milisecond. # # In addition events written from the same node in an unlogged batch, meant to be isolated in C*, have been # found in load testing to come partially to a concurrent query, and the partial results are not always the # ones with the lowest TimeUUID. # # Events coming out of order are detected for persitenceIds that the query has already seen by using the previous # tag pid sequence number. However for persistenceIds that the query has not seen the expecdted tag pid sequence number # is not known. # # Another reason this is important is if the events are delivered to the # query immediately and the offset is greater than some delayed events if the query is restarted from that offset # the delayed events will never be delivered. # # Setting this to anything lower than 2s is highly discouraged. eventual-consistency-delay = 100ms } snapshot { # Parameter indicating whether the journal keyspace should be auto created. # Not all Cassandra settings are configurable when using autocreate and for # full control of the keyspace and table definitions you should create them # manually (with a script). keyspace-autocreate = true # Parameter indicating whether the journal tables should be auto created # Not all Cassandra settings are configurable when using autocreate and for # full control of the keyspace and table definitions you should create them # manually (with a script). tables-autocreate = true } } datastax-java-driver { # always set this to allow reconnection on startup if cassandra is down # not overridiable profile so this plugin can't override it for you advanced.reconnect-on-init = true }