Surfacer Config
Configs: main | alerting | metrics | oauth | probes | rds | servers | surfacer | targets | tlsconfig | utils | validators
|
Language: |
cloudprober.surfacer.LabelFilter #
key: <string> value: <string>
cloudprober.surfacer.SurfacerDef #
# This name is used for logging. If not defined, it's derived from the type. # Note that this field is required for the USER_DEFINED surfacer type and # should match with the name that you used while registering the user defined # surfacer.name: <string> type: (NONE|PROMETHEUS|STACKDRIVER|FILE|POSTGRES|PUBSUB|CLOUDWATCH|DATADOG|PROBESTATUS|BIGQUERY|OTEL|USER_DEFINED): <enum># How many metrics entries (EventMetrics) to buffer. This is the buffer # between incoming metrics and the metrics that are being processed. Default # value should work in most cases. You may need to increase it on a busy # system, but that's usually a sign that you metrics processing pipeline is # slow for some reason, e.g. slow writes to a remote file. # Note: Only file and pubsub surfacer supports this option right now.metrics_buffer_size: <int64> | default: 10000# If specified, only allow metrics that match any of these label filters. # Example: # allow_metrics_with_label { # key: "probe", # value: "check_homepage", # }allow_metrics_with_label: <cloudprober.surfacer.LabelFilter># Ignore metrics that match any of these label filters. Ignore has precedence # over allow filters. # Example: # ignore_metrics_with_label { # key: "probe", # value: "sysvars", # }ignore_metrics_with_label: <cloudprober.surfacer.LabelFilter># Allow and ignore metrics based on their names. You can specify regexes # here. Ignore has precendence over allow. # Examples: # ignore_metrics_with_name: "validation_failure" # allow_metrics_with_name: "(total|success|latency)"allow_metrics_with_name: <string> ignore_metrics_with_name: <string># Whether to add failure metric or not. This option is enabled by default.add_failure_metric: <bool> | default: true# If set to true, cloudprober will export all metrics as gauge metrics. Note # that cloudprober inherently generates only cumulative metrics. To create # gauge metrics from cumulative metrics, we keep a copy of the old metrics # and subtract new metrics from the previous metrics. This transformation in # metrics has an increased memory-overhead because extra copies required. # However, it should not be noticeable unless you're producing large number # of metrics (say > 10000 metrics per second).export_as_gauge: <bool># Latency metric name pattern, used to identify latency metrics, and add # EventMetric's LatencyUnit to it.latency_metric_pattern: <string> | default: ^(.+_|)latency$# Environment variable containing additional labels to be added to all # metrics exported by this surfacer. # e.g. "CLOUDPROBER_ADDITIONAL_LABELS=env=prod,app=identity-service" # You can disable this feature by setting this field to an empty string. # Note: These additional labels have no effect if metrics already have the # same label.additional_labels_env_var: <string> | default: CLOUDPROBER_ADDITIONAL_LABELS# Matching surfacer specific configuration (one for each type in the above # enum)[prometheus_surfacer <cloudprober.surfacer.prometheus.SurfacerConf> | stackdriver_surfacer <cloudprober.surfacer.stackdriver.SurfacerConf> | file_surfacer <cloudprober.surfacer.file.SurfacerConf> | postgres_surfacer <cloudprober.surfacer.postgres.SurfacerConf> | pubsub_surfacer <cloudprober.surfacer.pubsub.SurfacerConf> | cloudwatch_surfacer <cloudprober.surfacer.cloudwatch.SurfacerConf> | datadog_surfacer <cloudprober.surfacer.datadog.SurfacerConf> | probestatus_surfacer <cloudprober.surfacer.probestatus.SurfacerConf> | bigquery_surfacer <cloudprober.surfacer.bigquery.SurfacerConf> | otel_surfacer <cloudprober.surfacer.otel.SurfacerConf>]: <oneof>
cloudprober.surfacer.bigquery.BQColumn #
label: <string> column_name: <string> column_type: <string>
cloudprober.surfacer.bigquery.SurfacerConf #
project_name: <string> bigquery_dataset: <string> bigquery_table: <string># It represents the bigquery table columns. # bigquery_columns { # label: "id", # column_name: "id", # column_type: "string", # }bigquery_columns: <cloudprober.surfacer.bigquery.BQColumn># It represents bigquery client timeout in seconds. So, if bigquery insertion # is not completed within this time period then the request will fail and the # failed rows will be retried later.bigquery_timeout_sec: <int64> | default: 30 metrics_buffer_size: <int64> | default: 100000# This denotes the time interval after which data will be inserted in # bigquery. Default is 10 seconds. So after every 10 seconds all the em in # current will be inserted in bigquery in a default batch size of 1000batch_timer_sec: <int64> | default: 10 metrics_batch_size: <int64> | default: 1000# Column name for metrics name, value and timestampmetric_time_col_name: <string> | default: metric_time metric_name_col_name: <string> | default: metric_name metric_value_col_name: <string> | default: metric_value
cloudprober.surfacer.cloudwatch.SurfacerConf #
# The cloudwatch metric namespacenamespace: <string> | default: cloudprober# The cloudwatch resolution value, lowering this below 60 will incur # additional charges as the metrics will be charged at a high resolution # rate.resolution: <int32> | default: 60# The AWS Region, used to create a CloudWatch session. # The order of fallback for evaluating the AWS Region: # 1. This config value. # 2. EC2 metadata endpoint, via cloudprober sysvars. # 3. AWS_REGION environment value. # 4. AWS_DEFAULT_REGION environment value, if AWS_SDK_LOAD_CONFIG is set. # https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/region: <string># The maximum number of metrics that will be published at one # time. Metrics will be stored locally in a cache until this # limit is reached. 1000 is the maximum number of metrics # supported by the Cloudwatch PutMetricData API. # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.metrics_batch_size: <int32> | default: 1000# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.batch_timer_sec: <int32> | default: 30
cloudprober.surfacer.datadog.SurfacerConf #
# Prefix to add to all metrics.prefix: <string> | default: cloudprober# Datadog API key. If not set, DD_API_KEY env variable is used.api_key: <string># Datadog APP key. If not set, DD_APP_KEY env variable is used.app_key: <string># Datadog server, default: "api.datadoghq.com"server: <string># The maximum number of metrics that will be published at one # time. Metrics will be stored locally in a cache until this # limit is reached. Datadog's SubmitMetric API has a maximum payload # size of 500 kilobytes (512000 bytes). Compressed payloads must have a # decompressed size of less than 5 megabytes (5242880 bytes). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.metrics_batch_size: <int32> | default: 1000# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.batch_timer_sec: <int32> | default: 30# Disable gzip compression of metric payload, when sending metrics to Datadog. # Compression is enabled by default.disable_compression: <bool>
cloudprober.surfacer.file.SurfacerConf #
# Where to write the results. If left unset, file surfacer writes to the # standard output.file_path: <string> prefix: <string> | default: cloudprober# Compress data before writing to the file.compression_enabled: <bool> | default: false
cloudprober.surfacer.otel.GRPCExporter #
# If no URL is provided, OpenTelemetry SDK will use the environment variable # OTEL_EXPORTER_OTLP_METRICS_ENDPOINT or OTEL_EXPORTER_OTLP_ENDPOINT in that # preference order.endpoint: <string> tls_config: <cloudprober.tlsconfig.TLSConfig># HTTP request headers. These can also be set using environment variables.http_header: <cloudprober.surfacer.otel.GRPCExporter.HttpHeaderEntry># Compression algorithm to use for gRPC requests.compression: (NONE|GZIP): <enum># Whether to use insecure gRPC connection.insecure: <bool>
cloudprober.surfacer.otel.GRPCExporter.HttpHeaderEntry #
key: <string> value: <string>
cloudprober.surfacer.otel.HTTPExporter #
# If no URL is provided, OpenTelemetry SDK will use the environment variable # OTEL_EXPORTER_OTLP_METRICS_ENDPOINT or OTEL_EXPORTER_OTLP_ENDPOINT in that # preference order.endpoint_url: <string> tls_config: <cloudprober.tlsconfig.TLSConfig># HTTP request headers. These can also be set using environment variables.http_header: <cloudprober.surfacer.otel.HTTPExporter.HttpHeaderEntry># Compression algorithm to use for HTTP requests.compression: (NONE|GZIP): <enum>
cloudprober.surfacer.otel.HTTPExporter.HttpHeaderEntry #
key: <string> value: <string>
cloudprober.surfacer.otel.SurfacerConf #
[otlp_http_exporter <cloudprober.surfacer.otel.HTTPExporter> | otlp_grpc_exporter <cloudprober.surfacer.otel.GRPCExporter>]: <oneof># How often metrics will be exported. Note that metrics are accumulated # internally and exported at this interval. Increasing this value will # increase the memory usage.export_interval_sec: <int32> | default: 10# Prefix to use for metrics. Defaults to "cloudprober_".metrics_prefix: <string> | default: cloudprober_ resource_attribute: <cloudprober.surfacer.otel.SurfacerConf.Attribute>
cloudprober.surfacer.otel.SurfacerConf.Attribute #
key: <string> value: <string>
cloudprober.surfacer.postgres.LabelToColumn #
# Label namelabel: <string># Column to map this label to:column: <string>
cloudprober.surfacer.postgres.SurfacerConf #
# Postgres connection string. # Example: # "postgresql://root:${PASSWORD}@localhost/cloudprober?sslmode=disable"connection_string: <string># Metrics table name. # To create table (when storing all labels in single column in JSON format): # CREATE TABLE metrics ( # time timestamp, metric_name varchar(80), value float8, labels jsonb # )metrics_table_name: <string># Adding label_to_column fields changes how labels are stored in a Postgres # table. If this field is not specified at all, all the labels are stored as # jsonb values as the 'labels' column (this mode impacts performance # negatively). If label_to_colum entries are specified for some labels, # those labels are stored in their dedicated columns; all the labels that # don't have a mapping will be dropped.label_to_column: <cloudprober.surfacer.postgres.LabelToColumn> metrics_buffer_size: <int64> | default: 10000# The maximum number of metric events will be commited in one transaction at one # time. Metrics will be stored locally until this limit is reached. Metrics will # be commited to postgres when the timer expires, or the buffer is full, whichever # happens first.metrics_batch_size: <int32> | default: 1# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be commited to postgres when the timer expires, or the buffer is full, # whichever happens first.batch_timer_sec: <int32> | default: 1
cloudprober.surfacer.probestatus.SurfacerConf #
# default 60sresolution_sec: <int32> | default: 60# Number of points in each timeseries. This field dictates how far back # can you go up to (resolution_sec * timeseries_size). Note that higher # this number, more memory you'll use.timeseries_size: <int32> | default: 4320# Max targets per probe.max_targets_per_probe: <int32> | default: 20# ProbeStatus URL # Note that older default URL /probestatus forwards to this URL to avoid # breaking older default setups.url: <string> | default: /status# Page cache timecache_time_sec: <int32> | default: 2# Probestatus surfacer is enabled by default. To disable it, set this # option.disable: <bool>
cloudprober.surfacer.prometheus.SurfacerConf #
# How many metrics entries (EventMetrics) to buffer. Incoming metrics # processing is paused while serving data to prometheus. This buffer is to # make writes to prometheus surfacer non-blocking. # NOTE: This field is confusing for users and will be removed from the config # after v0.10.3.metrics_buffer_size: <int64> | default: 10000# Whether to include timestamps in metrics. If enabled (default) each metric # string includes the metric timestamp as recorded in the EventMetric. # Prometheus associates the scraped values with this timestamp. If disabled, # i.e. timestamps are not exported, prometheus associates scraped values with # scrape timestamp.include_timestamp: <bool> | default: true# URL that prometheus scrapes metrics from.metrics_url: <string> | default: /metrics# Prefix to add to all metric names. For example setting this field to # "cloudprober_" will result in metrics with names: # cloudprober_total, cloudprober_success, cloudprober_latency, .. # # As it's typically useful to set this across the deployment, this field can # also be set through the command line flag --prometheus_metrics_prefix.metrics_prefix: <string>
cloudprober.surfacer.pubsub.SurfacerConf #
# GCP project name for pubsub. It's required if not running on GCP, # otherwise it's retrieved from the metadata.project: <string># Pubsub topic name. # Default is cloudprober-{hostname}topic_name: <string># Compress data before writing to pubsub.compression_enabled: <bool> | default: false
cloudprober.surfacer.stackdriver.SurfacerConf #
# GCP project name for stackdriver. If not specified and running on GCP, # project is used.project: <string># How often to export metrics to stackdriver.batch_timer_sec: <uint64> | default: 10# If allowed_metrics_regex is specified, only metrics matching the given # regular expression will be exported to stackdriver. Since probe type and # probe name are part of the metric name, you can use this field to restrict # stackdriver metrics to a particular probe. # Example: # allowed_metrics_regex: ".*(http|ping).*(success|validation_failure).*" # # Deprecated: Please use the common surfacer options to filter metrics: # https://cloudprober.org/docs/surfacers/overview/#filtering-metricsallowed_metrics_regex: <string># Monitoring URL base. Full metric URL looks like the following: # <monitoring_url>/<ptype>/<probe>/<metric> # Example: # custom.googleapis.com/cloudprober/http/google-homepage/latencymonitoring_url: <string> | default: custom.googleapis.com/cloudprober/# How many metrics entries to buffer. Incoming metrics # processing is paused while serving data to Stackdriver. This buffer is to # make writes to Stackdriver surfacer non-blocking.metrics_buffer_size: <int64> | default: 10000# Metric prefix to use for stackdriver metrics. If not specified, default # is PTYPE_PROBE.metrics_prefix: (NONE|PROBE|PTYPE_PROBE): <enum>
cloudprober.surfacer.LabelFilter #
key: <string> value: <string>
cloudprober.surfacer.SurfacerDef #
# This name is used for logging. If not defined, it's derived from the type. # Note that this field is required for the USER_DEFINED surfacer type and # should match with the name that you used while registering the user defined # surfacer.name: <string> type: (NONE|PROMETHEUS|STACKDRIVER|FILE|POSTGRES|PUBSUB|CLOUDWATCH|DATADOG|PROBESTATUS|BIGQUERY|OTEL|USER_DEFINED): <enum># How many metrics entries (EventMetrics) to buffer. This is the buffer # between incoming metrics and the metrics that are being processed. Default # value should work in most cases. You may need to increase it on a busy # system, but that's usually a sign that you metrics processing pipeline is # slow for some reason, e.g. slow writes to a remote file. # Note: Only file and pubsub surfacer supports this option right now.metrics_buffer_size: <int64> | default: 10000# If specified, only allow metrics that match any of these label filters. # Example: # allow_metrics_with_label { # key: "probe", # value: "check_homepage", # }allow_metrics_with_label: <cloudprober.surfacer.LabelFilter># Ignore metrics that match any of these label filters. Ignore has precedence # over allow filters. # Example: # ignore_metrics_with_label { # key: "probe", # value: "sysvars", # }ignore_metrics_with_label: <cloudprober.surfacer.LabelFilter># Allow and ignore metrics based on their names. You can specify regexes # here. Ignore has precendence over allow. # Examples: # ignore_metrics_with_name: "validation_failure" # allow_metrics_with_name: "(total|success|latency)"allow_metrics_with_name: <string> ignore_metrics_with_name: <string># Whether to add failure metric or not. This option is enabled by default.add_failure_metric: <bool> | default: true# If set to true, cloudprober will export all metrics as gauge metrics. Note # that cloudprober inherently generates only cumulative metrics. To create # gauge metrics from cumulative metrics, we keep a copy of the old metrics # and subtract new metrics from the previous metrics. This transformation in # metrics has an increased memory-overhead because extra copies required. # However, it should not be noticeable unless you're producing large number # of metrics (say > 10000 metrics per second).export_as_gauge: <bool># Latency metric name pattern, used to identify latency metrics, and add # EventMetric's LatencyUnit to it.latency_metric_pattern: <string> | default: ^(.+_|)latency$# Environment variable containing additional labels to be added to all # metrics exported by this surfacer. # e.g. "CLOUDPROBER_ADDITIONAL_LABELS=env=prod,app=identity-service" # You can disable this feature by setting this field to an empty string. # Note: These additional labels have no effect if metrics already have the # same label.additional_labels_env_var: <string> | default: CLOUDPROBER_ADDITIONAL_LABELS# Matching surfacer specific configuration (one for each type in the above # enum)[prometheus_surfacer <cloudprober.surfacer.prometheus.SurfacerConf> | stackdriver_surfacer <cloudprober.surfacer.stackdriver.SurfacerConf> | file_surfacer <cloudprober.surfacer.file.SurfacerConf> | postgres_surfacer <cloudprober.surfacer.postgres.SurfacerConf> | pubsub_surfacer <cloudprober.surfacer.pubsub.SurfacerConf> | cloudwatch_surfacer <cloudprober.surfacer.cloudwatch.SurfacerConf> | datadog_surfacer <cloudprober.surfacer.datadog.SurfacerConf> | probestatus_surfacer <cloudprober.surfacer.probestatus.SurfacerConf> | bigquery_surfacer <cloudprober.surfacer.bigquery.SurfacerConf> | otel_surfacer <cloudprober.surfacer.otel.SurfacerConf>]: <oneof>
cloudprober.surfacer.bigquery.BQColumn #
label: <string> column_name: <string> column_type: <string>
cloudprober.surfacer.bigquery.SurfacerConf #
project_name: <string> bigquery_dataset: <string> bigquery_table: <string># It represents the bigquery table columns. # bigquery_columns { # label: "id", # column_name: "id", # column_type: "string", # }bigquery_columns: <cloudprober.surfacer.bigquery.BQColumn># It represents bigquery client timeout in seconds. So, if bigquery insertion # is not completed within this time period then the request will fail and the # failed rows will be retried later.bigquery_timeout_sec: <int64> | default: 30 metrics_buffer_size: <int64> | default: 100000# This denotes the time interval after which data will be inserted in # bigquery. Default is 10 seconds. So after every 10 seconds all the em in # current will be inserted in bigquery in a default batch size of 1000batch_timer_sec: <int64> | default: 10 metrics_batch_size: <int64> | default: 1000# Column name for metrics name, value and timestampmetric_time_col_name: <string> | default: metric_time metric_name_col_name: <string> | default: metric_name metric_value_col_name: <string> | default: metric_value
cloudprober.surfacer.cloudwatch.SurfacerConf #
# The cloudwatch metric namespacenamespace: <string> | default: cloudprober# The cloudwatch resolution value, lowering this below 60 will incur # additional charges as the metrics will be charged at a high resolution # rate.resolution: <int32> | default: 60# The AWS Region, used to create a CloudWatch session. # The order of fallback for evaluating the AWS Region: # 1. This config value. # 2. EC2 metadata endpoint, via cloudprober sysvars. # 3. AWS_REGION environment value. # 4. AWS_DEFAULT_REGION environment value, if AWS_SDK_LOAD_CONFIG is set. # https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/region: <string># The maximum number of metrics that will be published at one # time. Metrics will be stored locally in a cache until this # limit is reached. 1000 is the maximum number of metrics # supported by the Cloudwatch PutMetricData API. # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.metrics_batch_size: <int32> | default: 1000# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.batch_timer_sec: <int32> | default: 30
cloudprober.surfacer.datadog.SurfacerConf #
# Prefix to add to all metrics.prefix: <string> | default: cloudprober# Datadog API key. If not set, DD_API_KEY env variable is used.api_key: <string># Datadog APP key. If not set, DD_APP_KEY env variable is used.app_key: <string># Datadog server, default: "api.datadoghq.com"server: <string># The maximum number of metrics that will be published at one # time. Metrics will be stored locally in a cache until this # limit is reached. Datadog's SubmitMetric API has a maximum payload # size of 500 kilobytes (512000 bytes). Compressed payloads must have a # decompressed size of less than 5 megabytes (5242880 bytes). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.metrics_batch_size: <int32> | default: 1000# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be published when the timer expires, or the buffer is # full, whichever happens first.batch_timer_sec: <int32> | default: 30# Disable gzip compression of metric payload, when sending metrics to Datadog. # Compression is enabled by default.disable_compression: <bool>
cloudprober.surfacer.file.SurfacerConf #
# Where to write the results. If left unset, file surfacer writes to the # standard output.file_path: <string> prefix: <string> | default: cloudprober# Compress data before writing to the file.compression_enabled: <bool> | default: false
cloudprober.surfacer.otel.GRPCExporter #
# If no URL is provided, OpenTelemetry SDK will use the environment variable # OTEL_EXPORTER_OTLP_METRICS_ENDPOINT or OTEL_EXPORTER_OTLP_ENDPOINT in that # preference order.endpoint: <string> tls_config: <cloudprober.tlsconfig.TLSConfig># HTTP request headers. These can also be set using environment variables.http_header: <cloudprober.surfacer.otel.GRPCExporter.HttpHeaderEntry># Compression algorithm to use for gRPC requests.compression: (NONE|GZIP): <enum># Whether to use insecure gRPC connection.insecure: <bool>
cloudprober.surfacer.otel.GRPCExporter.HttpHeaderEntry #
key: <string> value: <string>
cloudprober.surfacer.otel.HTTPExporter #
# If no URL is provided, OpenTelemetry SDK will use the environment variable # OTEL_EXPORTER_OTLP_METRICS_ENDPOINT or OTEL_EXPORTER_OTLP_ENDPOINT in that # preference order.endpoint_url: <string> tls_config: <cloudprober.tlsconfig.TLSConfig># HTTP request headers. These can also be set using environment variables.http_header: <cloudprober.surfacer.otel.HTTPExporter.HttpHeaderEntry># Compression algorithm to use for HTTP requests.compression: (NONE|GZIP): <enum>
cloudprober.surfacer.otel.HTTPExporter.HttpHeaderEntry #
key: <string> value: <string>
cloudprober.surfacer.otel.SurfacerConf #
[otlp_http_exporter <cloudprober.surfacer.otel.HTTPExporter> | otlp_grpc_exporter <cloudprober.surfacer.otel.GRPCExporter>]: <oneof># How often metrics will be exported. Note that metrics are accumulated # internally and exported at this interval. Increasing this value will # increase the memory usage.export_interval_sec: <int32> | default: 10# Prefix to use for metrics. Defaults to "cloudprober_".metrics_prefix: <string> | default: cloudprober_ resource_attribute: <cloudprober.surfacer.otel.SurfacerConf.Attribute>
cloudprober.surfacer.otel.SurfacerConf.Attribute #
key: <string> value: <string>
cloudprober.surfacer.postgres.LabelToColumn #
# Label namelabel: <string># Column to map this label to:column: <string>
cloudprober.surfacer.postgres.SurfacerConf #
# Postgres connection string. # Example: # "postgresql://root:${PASSWORD}@localhost/cloudprober?sslmode=disable"connection_string: <string># Metrics table name. # To create table (when storing all labels in single column in JSON format): # CREATE TABLE metrics ( # time timestamp, metric_name varchar(80), value float8, labels jsonb # )metrics_table_name: <string># Adding label_to_column fields changes how labels are stored in a Postgres # table. If this field is not specified at all, all the labels are stored as # jsonb values as the 'labels' column (this mode impacts performance # negatively). If label_to_colum entries are specified for some labels, # those labels are stored in their dedicated columns; all the labels that # don't have a mapping will be dropped.label_to_column: <cloudprober.surfacer.postgres.LabelToColumn> metrics_buffer_size: <int64> | default: 10000# The maximum number of metric events will be commited in one transaction at one # time. Metrics will be stored locally until this limit is reached. Metrics will # be commited to postgres when the timer expires, or the buffer is full, whichever # happens first.metrics_batch_size: <int32> | default: 1# The maximum amount of time to hold metrics in the buffer (above). # Metrics will be commited to postgres when the timer expires, or the buffer is full, # whichever happens first.batch_timer_sec: <int32> | default: 1
cloudprober.surfacer.probestatus.SurfacerConf #
# default 60sresolution_sec: <int32> | default: 60# Number of points in each timeseries. This field dictates how far back # can you go up to (resolution_sec * timeseries_size). Note that higher # this number, more memory you'll use.timeseries_size: <int32> | default: 4320# Max targets per probe.max_targets_per_probe: <int32> | default: 20# ProbeStatus URL # Note that older default URL /probestatus forwards to this URL to avoid # breaking older default setups.url: <string> | default: /status# Page cache timecache_time_sec: <int32> | default: 2# Probestatus surfacer is enabled by default. To disable it, set this # option.disable: <bool>
cloudprober.surfacer.prometheus.SurfacerConf #
# How many metrics entries (EventMetrics) to buffer. Incoming metrics # processing is paused while serving data to prometheus. This buffer is to # make writes to prometheus surfacer non-blocking. # NOTE: This field is confusing for users and will be removed from the config # after v0.10.3.metrics_buffer_size: <int64> | default: 10000# Whether to include timestamps in metrics. If enabled (default) each metric # string includes the metric timestamp as recorded in the EventMetric. # Prometheus associates the scraped values with this timestamp. If disabled, # i.e. timestamps are not exported, prometheus associates scraped values with # scrape timestamp.include_timestamp: <bool> | default: true# URL that prometheus scrapes metrics from.metrics_url: <string> | default: /metrics# Prefix to add to all metric names. For example setting this field to # "cloudprober_" will result in metrics with names: # cloudprober_total, cloudprober_success, cloudprober_latency, .. # # As it's typically useful to set this across the deployment, this field can # also be set through the command line flag --prometheus_metrics_prefix.metrics_prefix: <string>
cloudprober.surfacer.pubsub.SurfacerConf #
# GCP project name for pubsub. It's required if not running on GCP, # otherwise it's retrieved from the metadata.project: <string># Pubsub topic name. # Default is cloudprober-{hostname}topic_name: <string># Compress data before writing to pubsub.compression_enabled: <bool> | default: false
cloudprober.surfacer.stackdriver.SurfacerConf #
# GCP project name for stackdriver. If not specified and running on GCP, # project is used.project: <string># How often to export metrics to stackdriver.batch_timer_sec: <uint64> | default: 10# If allowed_metrics_regex is specified, only metrics matching the given # regular expression will be exported to stackdriver. Since probe type and # probe name are part of the metric name, you can use this field to restrict # stackdriver metrics to a particular probe. # Example: # allowed_metrics_regex: ".*(http|ping).*(success|validation_failure).*" # # Deprecated: Please use the common surfacer options to filter metrics: # https://cloudprober.org/docs/surfacers/overview/#filtering-metricsallowed_metrics_regex: <string># Monitoring URL base. Full metric URL looks like the following: # <monitoring_url>/<ptype>/<probe>/<metric> # Example: # custom.googleapis.com/cloudprober/http/google-homepage/latencymonitoring_url: <string> | default: custom.googleapis.com/cloudprober/# How many metrics entries to buffer. Incoming metrics # processing is paused while serving data to Stackdriver. This buffer is to # make writes to Stackdriver surfacer non-blocking.metrics_buffer_size: <int64> | default: 10000# Metric prefix to use for stackdriver metrics. If not specified, default # is PTYPE_PROBE.metrics_prefix: (NONE|PROBE|PTYPE_PROBE): <enum>