################################################################################################## ## ## ## Default configurations to use a JDBC database for storing events and projections ## ## ## ## Specific settings are passed as environment variables ## ## ## ################################################################################################## pekko { loglevel = DEBUG loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"] logger-startup-timeout = 10s actor { processtask-dispatcher { type = Dispatcher executor = "thread-pool-executor" thread-pool-executor { fixed-pool-size = 64 } throughput = 1 } localrouter-mailbox { mailbox-type = "org.apache.pekko.dispatch.SingleConsumerOnlyUnboundedMailbox" mailbox-capacity = 1500 } deployment { "/default-router" { mailbox = "pekko.actor.localrouter-mailbox" } "/cases" { mailbox = "pekko.actor.localrouter-mailbox" } "/process-tasks/*" { dispatcher = "pekko.actor.processtask-dispatcher" } } serialize-messages = true serializers { cafienne_serializer = "org.cafienne.infrastructure.serialization.CafienneSerializer" jackson-json = "org.apache.pekko.serialization.jackson.JacksonJsonSerializer" } serialization-bindings { "org.cafienne.infrastructure.serialization.CafienneSerializable" = cafienne_serializer "org.cafienne.infrastructure.serialization.JacksonSerializable" = jackson-json } } persistence { journal { plugin = "jdbc-journal" auto-start-journals = ["jdbc-journal"] } snapshot-store { plugin = "jdbc-snapshot-store" } } } cafienne { # Engine wide platform settings platform { # Platform has owners that are allowed to create/disable/enable tenants # This property specifies the set of user-id's that are owners # This array may not be empty. owners = ["admin", "CgVhZG1pbhIFbG9jYWw"] owners = ${?CAFIENNE_PLATFORM_OWNERS} # Default tenant will be used when a user does not provide the tenant as a parameter to # the API call (e.g. in StartCase). When the user is member of only one tenant, # then that tenant will be submitted along with the StartCase command. # If the user belongs to multiple tenants, then this default-tenant option will be passed. default-tenant = "world" default-tenant = ${?CAFIENNE_PLATFORM_DEFAULT_TENANT} # bootstrap-tenants holds a reference to zero or more json (or yaml) files that have default tenant information. # Each file is should contain information for one tenant (tenant name, users and owners). # During launch of the case engine, the files will be scanned and a CreateTenant command is sent # into the system, thereby setting up one or more default tenants. # If the bootstrap-tenants property is not filled and the default-tenant has a value, the system will search for # a file that holds the default tenant name plus either a .conf, .json, .yml or .yaml extension. # E.g. in case default-tenant = 'world', the system would search for existence in the following order: # - 'world.conf' # - 'world.json' # - 'world.yml' # - 'world.yaml' # If none of these files are found, the bootstrap attempt will be skipped. bootstrap-tenants = ["bootstrap/world.tenant.conf"] } engine { # Properties for sending tasks of type Mail mail-service { # Here you can fill any regular jakarta.mail properties # All properties mentioned here are passed into the connection with the mail server mail.host = localhost mail.smtp.port = 1025 # Optional username/pwd to be used to connect to the mail server authentication { user = "" password = "" } } # Properties to configure for the timer service timer-service { # Timer service window contains the period ahead to fetch timers from storage and schedule in memory window = 1h # Interval determines the period to wait until loading the next timer window, defaults to once per hour interval = 50m # Reference to the journal JDBC configuration (typically a shared-database). # This is used by the Timer Service to store the events. Note, this MUST point to the journal database, # as that is also the database where the flyway JDBC schema migrations are applied to. store = "pekko-persistence-jdbc.shared-databases.slick" } # Properties to configure storage (archiving and deletion functionality) storage-service { # Configuration of the archive mechanism - where to archive to and restore from archive { plugin = "file" file { directory = "./archive" } } # By default, when the engine starts, it checks whether any running storage processes # have to be recovered. This can be disabled through this property. disable-recovery = false } } api { bindhost = "0.0.0.0" bindport = 2027 security { ############################################################################### ## Below settings can be used to configure multiple OIDC services ## ## that ships with the docker images in the cafienne repository named ## ## getting-started. ## ############################################################################### # configuration settings for OpenID Connect # Note: this is a different config (list based) when compared to other circle engine-setup configs. # The below version is the new style, the other versions are in the "old" style, which is still supported. # The below version is used in in-memory and in jdbc config. oidc = [{ connect-url = "http://localhost:2377" token-url = "http://localhost:2377/token" key-url = "http://localhost:2377/keys" authorization-url = "http://localhost:2377/auth" issuer = "http://localhost:2377" }] # The subject of a valid JWT token is used to query the corresponging registered platform user from the database. # These identities can be cached to avoid repeated queries and thereby improve throughput times. # The size of the cache can be set here, it defaults to 1000 # The cache is disabled if size is 0 or a negative number. identity.cache.size = 1000 ################################################################################################### ## ## ## Fill this setting to true to allow developers to access engine events without authentication ## ## ## ## WARNING - Enabling opens up the full engine in read-only mode for anyone to access ## ## ## ################################################################################################### debug.events.open = true debug.events.open = ${?CAFIENNE_DEBUG_EVENTS} } } # The case engine reads definitions as XML files from disk and/or the classpath. # The files are cached in-memory, based on their lastModified timestamp # (i.e., if you change a file on disk, the engine will reload it into the cache). # By default, the engine will read from the configured location. If the definitions file cannot be found # in this location, the engine will try to load it as a resource from the classpath, hence enabling to ship # fixed definitions in a jar file. definitions { provider = "org.cafienne.cmmn.repository.file.FileBasedDefinitionProvider" location = "./definitions" location = ${?CAFIENNE_CMMN_DEFINITIONS_PATH} cache { size = 100 } } actor { # the seconds of wait time before a response to a command is expected # by the http command routes ask-timeout = 60 # the seconds of idle time after which a case actor is removed from memory # if the case has not received new commands after the specified number of seconds, # the case engine will ask the actor system to remove the case from memory to avoid memory leaks. idle-period = 600 # If debug is true, then all StartCase commands by default will run in debug mode, # unless specified otherwise in the command debug = false } # This setting tells cafienne which journal to use for reading events. # If omitted, cafienne will try to guess the read journal, based on the default settings read-journal = "jdbc-read-journal" query-db { profile = "" profile = ${?QUERY_DB_PROFILE} db { driver = "" driver = ${?QUERY_DB_DRIVER} ################################################################### ## ## ## Database schema 'cafienne-query' must be created manually ## ## ## ################################################################### ################################################################### ## ## ## MAKE SURE TO FILL USER AND PASSWORD FOR CONNECTION ## ## ## ################################################################### user = "" user = ${?QUERY_DB_USER} password = "" password = ${?QUERY_DB_PASSWORD} numThreads = 10 connectionTimeout = 5000 validationTimeout = 5000 url = "" url = ${?QUERY_DB_URL} } # Configuration options handling exceptions that may occur while reading # the event streams that populate the query-db tables # See also https://pekko.apache.org/docs/pekko/current/stream/stream-error.html#delayed-restarts-with-a-backoff-operator restart-stream { min-back-off = 500ms max-back-off = 30s random-factor = 0.20 max-restarts = 20 max-restarts-within = 5m } } } ####################################################################################### ## ## ## Below are settings for JDBC Database journal ## ## ## ## As of now, the database schema 'cafienne-eventstore' must be created manually ## ## ## ####################################################################################### pekko-persistence-jdbc { database-provider-fqcn = "org.cafienne.journal.jdbc.EventDatabaseProvider" shared-databases { slick { profile ="" profile = ${?EVENT_DB_PROFILE} db { driver = "" driver = ${?EVENT_DB_DRIVER} url = "" url = ${?EVENT_DB_URL} # User name to connect, update and query user = "" user = ${?EVENT_DB_USER} password = "" password = ${?EVENT_DB_PASSWORD} numThreads = 5 connectionTimeout = 5000 validationTimeout = 5000 maxConnections = 5 minConnections = 1 } } } } jdbc-journal { use-shared-db = "slick" } jdbc-snapshot-store { use-shared-db = "slick" } jdbc-read-journal { use-shared-db = "slick" refresh-interval = "0.1s" }