diff --git a/class/defaults.yml b/class/defaults.yml index e0adaa4..6f05c24 100644 --- a/class/defaults.yml +++ b/class/defaults.yml @@ -56,37 +56,7 @@ parameters: for: 6h severity: warning - clusterLogging: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: loki - elasticsearch: - nodeCount: 3 - storage: - size: 200Gi - redundancyPolicy: SingleRedundancy - nodeSelector: - node-role.kubernetes.io/infra: '' - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - visualization: - type: kibana - kibana: - replicas: 2 - nodeSelector: - node-role.kubernetes.io/infra: '' - collection: - type: vector + clusterLogging: {} clusterLogForwarding: enabled: false diff --git a/component/config_forwarding.libsonnet b/component/config_forwarding.libsonnet new file mode 100644 index 0000000..3004b7a --- /dev/null +++ b/component/config_forwarding.libsonnet @@ -0,0 +1,188 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local lib = import 'lib/openshift4-logging.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.openshift4_logging; + +local deployLokistack = params.components.lokistack.enabled; +local deployElasticsearch = params.components.elasticsearch.enabled; +local forwardingOnly = !deployLokistack && !deployElasticsearch; + +local pipelineOutputRefs(pipeline) = + local default = if forwardingOnly then [] else [ 'default' ]; + std.get(pipeline, 'forwarders', []) + default; + +// Apply default config for application logs. +local patchAppLogDefaults = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.application_logs), + local enablePipeline = std.length(outputRefs) > 0, + + pipelines: { + [if enablePipeline then 'application-logs']: { + inputRefs: [ 'application' ], + outputRefs: outputRefs, + }, + }, +}; + +// Apply default config for infra logs. +local patchInfraLogDefaults = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs), + local enablePipeline = params.clusterLogForwarding.infrastructure_logs.enabled && std.length(outputRefs) > 0, + + pipelines: { + [if enablePipeline then 'infrastructure-logs']: { + inputRefs: [ 'infrastructure' ], + outputRefs: outputRefs, + }, + }, +}; + +// Apply default config for audit logs. +local patchAuditLogDefaults = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.audit_logs), + local enablePipeline = params.clusterLogForwarding.audit_logs.enabled && std.length(outputRefs) > 0, + + pipelines: { + [if enablePipeline then 'audit-logs']: { + inputRefs: [ 'audit' ], + outputRefs: outputRefs, + }, + }, +}; + +// Enable json parsing for default pipelines if configured. +local patchJsonLogging = { + local enableAppLogs = std.get(params.clusterLogForwarding.application_logs, 'json', false), + local enableInfraLogs = std.get(params.clusterLogForwarding.infrastructure_logs, 'json', false), + + pipelines: { + [if enableAppLogs then 'application-logs']: { parse: 'json' }, + [if enableInfraLogs then 'infrastructure-logs']: { parse: 'json' }, + }, + [if deployElasticsearch && params.clusterLogForwarding.json.enabled then 'outputDefaults']: { + elasticsearch: { + structuredTypeKey: params.clusterLogForwarding.json.typekey, + structuredTypeName: params.clusterLogForwarding.json.typename, + }, + }, +}; + +// Enable detectMultilineErrors for default pipelines if configured. +local patchMultilineErrors = { + local enableAppLogs = std.get(params.clusterLogForwarding.application_logs, 'detectMultilineErrors', false), + local enableInfraLogs = std.get(params.clusterLogForwarding.infrastructure_logs, 'detectMultilineErrors', false), + + pipelines: { + [if enableAppLogs then 'application-logs']: { detectMultilineErrors: true }, + [if enableInfraLogs then 'infrastructure-logs']: { detectMultilineErrors: true }, + }, +}; + +// --- patch deprecated `clusterLogForwarding.namespace` config +local namespaceGroups = ( + if std.objectHas(params.clusterLogForwarding, 'namespaces') then + { + [ns]: { + namespaces: [ ns ], + forwarders: [ params.clusterLogForwarding.namespaces[ns].forwarder ], + } + for ns in std.objectFields(params.clusterLogForwarding.namespaces) + } else {} +) + params.clusterLogForwarding.namespace_groups; +// --- patch end + +// Add inputs entry for every namespace_group defined in `clusterLogForwarding.namespace_groups`. +local patchCustomInputs = { + [if std.length(namespaceGroups) > 0 then 'inputs']: { + [group]: { + application: { + namespaces: namespaceGroups[group].namespaces, + }, + } + for group in std.objectFields(namespaceGroups) + }, +}; + +// Add pipelines entry for every namespace_group defined in `clusterLogForwarding.namespace_groups`. +local patchCustomPipelines = { + [if std.length(namespaceGroups) > 0 then 'pipelines']: { + local enableJson = std.get(namespaceGroups[group], 'json', false), + local enableMultilineError = std.get(namespaceGroups[group], 'detectMultilineErrors', false), + + [group]: { + inputRefs: [ group ], + outputRefs: std.get(namespaceGroups[group], 'forwarders', []), + [if enableJson then 'parse']: 'json', + [if enableMultilineError then 'detectMultilineErrors']: true, + } + for group in std.objectFields(namespaceGroups) + }, +}; + +// Add outputs entry for every forwarder defined in `clusterLogForwarding.forwarders`. +local patchCustomOutputs = { + [if std.length(params.clusterLogForwarding.forwarders) > 0 then 'outputs']: { + [name]: params.clusterLogForwarding.forwarders[name] + for name in std.objectFields(params.clusterLogForwarding.forwarders) + }, +}; + +// ClusterLogForwarderSpecs: +// Consecutively apply patches to result of previous apply. +local clusterLogForwarderSpec = std.foldl( + // we use std.mergePatch here, because this way we don't need + // to make each patch object mergeable by suffixing all keys with a +. + function(manifest, patch) std.mergePatch(manifest, patch), + [ + patchAppLogDefaults, + patchInfraLogDefaults, + patchAuditLogDefaults, + patchJsonLogging, + patchMultilineErrors, + patchCustomInputs, + patchCustomOutputs, + patchCustomPipelines, + ], + { + inputs: {}, + outputs: {}, + pipelines: {}, + } +); + +// ClusterLogForwarder: +// Create definitive ClusterLogForwarder resource from specs. +local clusterLogForwarder = lib.ClusterLogForwarder(params.namespace, 'instance') { + spec: { + // Unfold objects into array. + [if std.length(clusterLogForwarderSpec.inputs) > 0 then 'inputs']: [ + { name: name } + clusterLogForwarderSpec.inputs[name] + for name in std.objectFields(clusterLogForwarderSpec.inputs) + ], + [if std.length(clusterLogForwarderSpec.outputs) > 0 then 'outputs']: [ + { name: name } + clusterLogForwarderSpec.outputs[name] + for name in std.objectFields(clusterLogForwarderSpec.outputs) + ], + [if std.length(clusterLogForwarderSpec.pipelines) > 0 then 'pipelines']: [ + { name: name } + clusterLogForwarderSpec.pipelines[name] + for name in std.objectFields(clusterLogForwarderSpec.pipelines) + ], + } + { + // Import remaining specs as is. + [key]: clusterLogForwarderSpec[key] + for key in std.objectFields(clusterLogForwarderSpec) + if !std.member([ 'inputs', 'outputs', 'pipelines' ], key) + }, +}; + +// Define outputs below +if params.clusterLogForwarding.enabled then + { + '31_cluster_logforwarding': clusterLogForwarder, + } +else + std.trace( + 'Log forwarding disabled, not deploying ClusterLogForwarder', + {} + ) diff --git a/component/config_logging.libsonnet b/component/config_logging.libsonnet new file mode 100644 index 0000000..1945fce --- /dev/null +++ b/component/config_logging.libsonnet @@ -0,0 +1,123 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local lib = import 'lib/openshift4-logging.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.openshift4_logging; + +local deployLokistack = params.components.lokistack.enabled; +local deployElasticsearch = params.components.elasticsearch.enabled; + +// Apply defaults for Lokistack. +local patchLokistackDefaults = { + [if deployLokistack then 'spec']: { + logStore: { + type: 'lokistack', + lokistack: { + name: 'loki', + }, + }, + }, +}; + +// Apply defaults for Elasticsearch. +local patchElasticsearchDefaults = { + [if deployElasticsearch then 'spec']: { + logStore: { + elasticsearch: { + nodeCount: 3, + storage: { + size: '200Gi', + }, + redundancyPolicy: 'SingleRedundancy', + nodeSelector: { + 'node-role.kubernetes.io/infra': '', + }, + }, + retentionPolicy: { + application: { + maxAge: '7d', + pruneNamespacesInterval: '15m', + }, + infra: { + maxAge: '30d', + pruneNamespacesInterval: '15m', + }, + audit: { + maxAge: '30d', + pruneNamespacesInterval: '15m', + }, + }, + }, + visualization: { + type: 'kibana', + kibana: { + replicas: 2, + nodeSelector: { + 'node-role.kubernetes.io/infra': '', + }, + }, + }, + }, +}; + +// Apply customisations from params.clusterLogging. +local patchLoggingConfig = { + spec: params.clusterLogging { + collection: { + // Don't include legacy config key 'collection.logs'. + [it]: params.clusterLogging.collection[it] + for it in std.objectFields(std.get(params.clusterLogging, 'collection', {})) + if it != 'logs' + }, + }, +}; + +// --- patch deprecated logging resource +local patchLegacyConfig = { + local legacyConfig = std.get(std.get(params.clusterLogging, 'collection', { collection: {} }), 'logs', {}), + local legacyType = std.get(legacyConfig, 'type', ''), + local legacyFluentd = std.get(legacyConfig, 'fluentd', {}), + + spec: { + collection: if std.length(legacyConfig) > 0 then std.trace( + 'Parameter `clusterLogging.collector.logs` is deprecated. Please update your config to use `clusterLogging.collector`', + { + [if legacyType != '' then 'type']: legacyType, + } + legacyFluentd, + ) else {}, + }, +}; +// --- patch end + + +// ClusterLogging specs: +// Consecutively apply patches to result of previous apply. +local clusterLogging = std.foldl( + // we use std.mergePatch here, because this way we don't need + // to make each patch object mergeable by suffixing all keys with a +. + function(manifest, patch) std.mergePatch(manifest, patch), + [ + patchLokistackDefaults, + patchElasticsearchDefaults, + patchLoggingConfig, + patchLegacyConfig, + ], + lib.ClusterLogging(params.namespace, 'instance') { + metadata+: { + annotations+: { + 'argocd.argoproj.io/sync-options': 'SkipDryRunOnMissingResource=true', + }, + }, + spec: { + managementState: 'Managed', + collection: { + type: 'vector', + }, + }, + } +); + +// Define outputs below +{ + '30_cluster_logging': clusterLogging, +} diff --git a/component/main.jsonnet b/component/main.jsonnet index b359f47..843e178 100644 --- a/component/main.jsonnet +++ b/component/main.jsonnet @@ -9,57 +9,6 @@ local params = inv.parameters.openshift4_logging; local deployLokistack = params.components.lokistack.enabled; local deployElasticsearch = params.components.elasticsearch.enabled; -local group = 'operators.coreos.com/'; -local clusterLoggingGroupVersion = 'logging.openshift.io/v1'; - -local forwardingOnly = !params.components.elasticsearch.enabled && !params.components.lokistack.enabled; - -local namespace_groups = ( - if std.objectHas(params.clusterLogForwarding, 'namespaces') then - { - [ns]: { - namespaces: [ ns ], - forwarders: [ params.clusterLogForwarding.namespaces[ns].forwarder ], - } - for ns in std.objectFields(params.clusterLogForwarding.namespaces) - } - else - {} -) + params.clusterLogForwarding.namespace_groups; - -// --- Patch deprecated logging resource -local legacyCollectionConfig = std.get(params.clusterLogging.collection, 'logs', {}); -local legacyCollectionPatch = if std.length(legacyCollectionConfig) > 0 then std.trace( - 'Parameter `clusterLogging.collector.logs` is deprecated. Please update your config to use `clusterLogging.collector`', - { - local type = std.get(legacyCollectionConfig, 'type', ''), - local fluentd = std.get(legacyCollectionConfig, 'fluentd', {}), - collection+: { - [if type != '' then 'type']: type, - } + if std.length(fluentd) > 0 then fluentd, - } -) else {}; - -local clusterLogging = std.mergePatch( - params.clusterLogging { - collection: { - [it]: params.clusterLogging.collection[it] - for it in std.objectFields(params.clusterLogging.collection) - if it != 'logs' - }, - } + legacyCollectionPatch, - { - // Patch to remove certain keys, as the ClusterLogging operator would just - // deploy elasticsearch or kibana if they are configured - [if forwardingOnly then 'logStore']: null, - } -); -// --- End patch - -local pipelineOutputRefs(pipeline) = - local default = if forwardingOnly then [] else [ 'default' ]; - std.get(pipeline, 'forwarders', []) + default; - // Namespace local namespace = kube.Namespace(params.namespace) { @@ -136,109 +85,9 @@ local subscriptions = std.filter(function(it) it != null, [ '00_namespace': namespace, '10_operator_group': operatorGroup, '20_subscriptions': subscriptions, - '30_cluster_logging': std.mergePatch( - // ClusterLogging resource from inventory - kube._Object(clusterLoggingGroupVersion, 'ClusterLogging', 'instance') { - metadata+: { - namespace: params.namespace, - annotations+: { - 'argocd.argoproj.io/sync-options': 'SkipDryRunOnMissingResource=true', - }, - }, - spec: clusterLogging, - }, { - // Patch to remove certain keys, as the ClusterLogging operator would just - // deploy elasticsearch or kibana if they are configured - spec: { - logStore: { - [if !params.components.elasticsearch.enabled then 'elasticsearch']: null, - [if !params.components.lokistack.enabled then 'lokistack']: null, - }, - [if !params.components.elasticsearch.enabled then 'visualization']: null, - }, - } - ), - [if params.clusterLogForwarding.enabled then '31_cluster_logforwarding']: kube._Object(clusterLoggingGroupVersion, 'ClusterLogForwarder', 'instance') { - metadata+: { - namespace: params.namespace, - }, - spec: { - [if params.clusterLogForwarding.json.enabled then 'outputDefaults']: { - elasticsearch: { - structuredTypeKey: params.clusterLogForwarding.json.typekey, - structuredTypeName: params.clusterLogForwarding.json.typename, - }, - }, - [if std.length(params.clusterLogForwarding.forwarders) > 0 then 'outputs']: [ - params.clusterLogForwarding.forwarders[fw] { name: fw } - for fw in std.objectFields(params.clusterLogForwarding.forwarders) - ], - [if std.length(namespace_groups) > 0 then 'inputs']: [ - { - name: group, - application: { - namespaces: namespace_groups[group].namespaces, - }, - } - for group in std.objectFields(namespace_groups) - ], - [if std.length(namespace_groups) > 0 then 'pipelines']: [ - local enable_json = com.getValueOrDefault(namespace_groups[group], 'json', false); - local enable_multilineErrors = com.getValueOrDefault(namespace_groups[group], 'detectMultilineErrors', false); - local patch_json = { outputRefs: [ 'default' ], parse: 'json' }; - { - name: group, - inputRefs: [ group ], - outputRefs: com.getValueOrDefault(namespace_groups[group], 'forwarders', []), - } + com.makeMergeable(if enable_json then patch_json else {}) - + com.makeMergeable(if enable_multilineErrors then { detectMultilineErrors: true } else {}) - for group in std.objectFields(namespace_groups) - ], - } + com.makeMergeable( - local enable_pipeline = std.length(pipelineOutputRefs(params.clusterLogForwarding.application_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.application_logs, 'json', false); - local enable_multilineErrors = com.getValueOrDefault(params.clusterLogForwarding.application_logs, 'detectMultilineErrors', false); - { - [if enable_pipeline then 'pipelines']: [ - { - name: 'application-logs', - inputRefs: [ 'application' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.application_logs), - [if enable_json then 'parse']: 'json', - [if enable_multilineErrors then 'detectMultilineErrors']: true, - }, - ], - } - ) + com.makeMergeable( - local enable_pipeline = params.clusterLogForwarding.infrastructure_logs.enabled && std.length(pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.infrastructure_logs, 'json', false); - local enable_multilineErrors = com.getValueOrDefault(params.clusterLogForwarding.infrastructure_logs, 'detectMultilineErrors', false); - { - [if enable_pipeline then 'pipelines']: [ - { - name: 'infrastructure-logs', - inputRefs: [ 'infrastructure' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs), - [if enable_json then 'parse']: 'json', - [if enable_multilineErrors then 'detectMultilineErrors']: true, - }, - ], - } - ) + com.makeMergeable( - local enable_pipeline = params.clusterLogForwarding.audit_logs.enabled && std.length(pipelineOutputRefs(params.clusterLogForwarding.application_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.audit_logs, 'json', false); - { - [if params.clusterLogForwarding.audit_logs.enabled then 'pipelines']: [ - { - name: 'audit-logs', - inputRefs: [ 'audit' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.audit_logs), - }, - ], - } - ), - }, } ++ (import 'config_logging.libsonnet') ++ (import 'config_forwarding.libsonnet') + (import 'loki.libsonnet') + (import 'elasticsearch.libsonnet') + (import 'alertrules.libsonnet') diff --git a/docs/modules/ROOT/pages/references/parameters.adoc b/docs/modules/ROOT/pages/references/parameters.adoc index 8a5d5aa..e0ae599 100644 --- a/docs/modules/ROOT/pages/references/parameters.adoc +++ b/docs/modules/ROOT/pages/references/parameters.adoc @@ -292,7 +292,7 @@ The max allowed volume for a tenant per day can be calculated with the following ++++ The default of 5 MiB/s allows up to ~420 GiB of logs per day for a tenant. -See the https://docs.openshift.com/container-platform/latest/logging/cluster-logging-loki.html#logging-loki-deploy_cluster-logging-loki[Openshift Docs] for available parameters. +See the https://docs.openshift.com/container-platform/latest/observability/logging/cluster-logging-deploying.html#configuring-log-storage-cr_cluster-logging-deploying[Openshift Docs] for available parameters. See the https://loki-operator.dev/docs/api.md/[Loki Operator Docs] for available Lokistack specs. == `operatorResources` @@ -312,7 +312,7 @@ default:: see `defaults.yml` A dictionary holding the `.spec` for cluster logging. -See the https://docs.openshift.com/container-platform/latest/logging/config/cluster-logging-configuring-cr.html[OpenShift docs] for available parameters. +See the https://docs.openshift.com/container-platform/latest/observability/logging/cluster-logging-deploying.html#create-cluster-logging-cli_cluster-logging-deploying[OpenShift docs] for available parameters. == `clusterLogForwarding` @@ -333,7 +333,7 @@ default:: {} Each key in this dictionary holds the parameters for an `.spec.outputs` object. -See the https://docs.openshift.com/container-platform/latest/logging/cluster-logging-external.html[OpenShift docs] for available parameters. +See the https://docs.openshift.com/container-platform/4.16/observability/logging/log_collection_forwarding/configuring-log-forwarding.html#logging-create-clf_configuring-log-forwarding[OpenShift docs] for available parameters. === `clusterLogForwarding.namespace_groups` @@ -481,7 +481,7 @@ clusterLogForwarding: <2> The value of that field, if present, is used to construct the index name. <3> If `typekey` isn't set or its key isn't present, the value of this field is used to construct the index name. -See the https://docs.openshift.com/container-platform/latest/logging/cluster-logging-enabling-json-logging.html#cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_cluster-logging-enabling-json-logging[OpenShift docs] for a detailed explanation. +See the https://docs.openshift.com/container-platform/latest/observability/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.html[OpenShift docs] for a detailed explanation. == Example diff --git a/lib/openshift4-logging.libsonnet b/lib/openshift4-logging.libsonnet new file mode 100644 index 0000000..dd2a061 --- /dev/null +++ b/lib/openshift4-logging.libsonnet @@ -0,0 +1,18 @@ +local kube = import 'lib/kube.libjsonnet'; + +local ClusterLogging(namespace, name) = kube._Object('logging.openshift.io/v1', 'ClusterLogging', name) { + metadata+: { + namespace: namespace, + }, +}; + +local ClusterLogForwarder(namespace, name) = kube._Object('logging.openshift.io/v1', 'ClusterLogForwarder', name) { + metadata+: { + namespace: namespace, + }, +}; + +{ + ClusterLogging: ClusterLogging, + ClusterLogForwarder: ClusterLogForwarder, +} diff --git a/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed diff --git a/tests/golden/forwardingonly/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/forwardingonly/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index af17d2f..5b5a28d 100644 --- a/tests/golden/forwardingonly/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/forwardingonly/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -10,5 +10,4 @@ metadata: spec: collection: type: vector - logStore: {} managementState: Managed diff --git a/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed diff --git a/tests/golden/master/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml b/tests/golden/master/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml new file mode 100644 index 0000000..c51f960 --- /dev/null +++ b/tests/golden/master/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml @@ -0,0 +1,35 @@ +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + annotations: {} + labels: + name: instance + name: instance + namespace: openshift-logging +spec: + inputs: + - application: + namespaces: + - app-one + - app-two + name: my-apps + outputs: + - name: custom-forwarder + type: syslog + pipelines: + - inputRefs: + - application + name: application-logs + outputRefs: + - default + - inputRefs: + - infrastructure + name: infrastructure-logs + outputRefs: + - default + - inputRefs: + - my-apps + name: my-apps + outputRefs: + - custom-forwarder + parse: json diff --git a/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed diff --git a/tests/master.yml b/tests/master.yml index be9ca18..71ef248 100644 --- a/tests/master.yml +++ b/tests/master.yml @@ -25,3 +25,17 @@ parameters: openshift4_logging: channel: 'stable' alerts: 'master' + + clusterLogForwarding: + enabled: true + forwarders: + custom-forwarder: + type: syslog + namespace_groups: + my-apps: + namespaces: + - app-one + - app-two + forwarders: + - custom-forwarder + json: true