Commit cb52432e authored by Zivi's avatar Zivi

changed elastic chart, adapted values

parent ef15113e
......@@ -20,60 +20,60 @@ affinity:
- jobs
jobs:
- name: csv-link-producer
image:
repository: source.dodis.ch:4577/histhub/csv-link-producer
resources:
limits:
memory: "128Mi"
cpu: 0.1
- name: csv-link-producer-urls
image:
repository: source.dodis.ch:4577/histhub/csv-link-producer-url
resources:
limits:
memory: "128Mi"
cpu: 0.1
- name: dodis
image:
repository: source.dodis.ch:4577/histhub/dodis
buildSitemap: true
resources:
limits:
memory: "128Mi"
cpu: 0.2
cronjob:
schedule: "59 23 * * 2"
- name: fundmuenzen
image:
repository: source.dodis.ch:4577/histhub/fundmuenzen
resources:
limits:
memory: "256Mi"
cpu: 0.1
cronjob:
schedule: "59 23 * * 3"
- name: geonames-dump-producer
image:
repository: source.dodis.ch:4577/histhub/geonames-dump-producer
resources:
limits:
memory: "1Gi"
cpu: 1
requests:
memory: "512Mi"
cpu: 0.4
cronjob:
schedule: "59 23 10 * *"
- name: ortsnamen
image:
repository: source.dodis.ch:4577/histhub/ortsnamen
resources:
limits:
memory: "256Mi"
cpu: 0.2
cronjob:
schedule: "59 23 * * 4"
# - name: csv-link-producer
# image:
# repository: source.dodis.ch:4577/histhub/csv-link-producer
# resources:
# limits:
# memory: "128Mi"
# cpu: 0.1
# - name: csv-link-producer-urls
# image:
# repository: source.dodis.ch:4577/histhub/csv-link-producer-url
# resources:
# limits:
# memory: "128Mi"
# cpu: 0.1
# - name: dodis
# image:
# repository: source.dodis.ch:4577/histhub/dodis
# buildSitemap: true
# resources:
# limits:
# memory: "128Mi"
# cpu: 0.2
# cronjob:
# schedule: "59 23 * * 2"
# - name: fundmuenzen
# image:
# repository: source.dodis.ch:4577/histhub/fundmuenzen
# resources:
# limits:
# memory: "256Mi"
# cpu: 0.1
# cronjob:
# schedule: "59 23 * * 3"
# - name: geonames-dump-producer
# image:
# repository: source.dodis.ch:4577/histhub/geonames-dump-producer
# resources:
# limits:
# memory: "1Gi"
# cpu: 1
# requests:
# memory: "512Mi"
# cpu: 0.4
# cronjob:
# schedule: "59 23 10 * *"
# - name: ortsnamen
# image:
# repository: source.dodis.ch:4577/histhub/ortsnamen
# resources:
# limits:
# memory: "256Mi"
# cpu: 0.2
# cronjob:
# schedule: "59 23 * * 4"
- name: wikidata-dump-producer
image:
repository: source.dodis.ch:4577/histhub/wikidata-dump-producer
......@@ -84,19 +84,19 @@ jobs:
requests:
memory: "512Mi"
cpu: 0.4
- name: ethz
image:
repository: source.dodis.ch:4577/histhub/ethz
cronjob:
schedule: "59 23 * * 6"
# - name: ethz
# image:
# repository: source.dodis.ch:4577/histhub/ethz
# cronjob:
# schedule: "59 23 * * 6"
deployments:
- name: wikidata-live-feed
image:
repository: source.dodis.ch:4577/histhub/wikidata-live-feed
resources:
limits:
cpu: 0.2
memory: "256Mi"
requests:
memory: "128Mi"
deployments: {}
# - name: wikidata-live-feed
# image:
# repository: source.dodis.ch:4577/histhub/wikidata-live-feed
# resources:
# limits:
# cpu: 0.2
# memory: "256Mi"
# requests:
# memory: "128Mi"
......@@ -14,7 +14,7 @@ container:
- name: "kafka:broker"
value: "geolinker-kafka-bootstrap:9092"
- name: "elasticsearch:server"
value: "http://elastic-elasticsearch-client"
value: "http://elasticsearch-master"
- name: "elasticsearch:port"
value: "9200"
......
......@@ -43,4 +43,7 @@ spec:
pod:
affinity:
{{ toYaml .Values.kafka.affinity | nindent 10 }}
entityOperator: {}
entityOperator:
userOperator: {}
topicOperator:
watchedNamespace: {{ .Values.kafka.topicOperator.watchedNamespace }}
......@@ -6,6 +6,8 @@ kafka:
storageSpace: 10Gi
resources: {}
affinity: {}
topicOperator:
watchedNamespace: geolinker
kafkaConnect:
enabled: true
......@@ -19,12 +21,20 @@ zk:
affinity: {}
kafkaTopics:
- name: extract
- name: magpie
- name: wikidata-geo
partitions: 5
- name: response-geoconcordance-metadata
retentionMs: 8400000
- name: request-geoconcordance
retentionMs: 8400000
- name: response-geoconcordance
retentionMs: 8400000
- name: response-geoconcordance-url
retentionMs: 8400000
- name: geolinker
partitions: 5
- name: linker
partitions: 5
schemaRegistry:
name: kafka-schema-registry
......
......@@ -15,7 +15,7 @@ container:
- name: "kafka:broker"
value: "geolinker-kafka-bootstrap:9092"
- name: "neo4j:config:uri"
value: "bolt+routing://neo4j-neo4j:7687"
value: "bolt://neo4j-neo4j:7687"
- name: "neo4j:config:user"
value: "neo4j"
......
---
elasticsearch:
master:
replicas: 2
nodeSelector:
doks.digitalocean.com/node-pool: elastic
persistence:
enabled: false
# resources:
# limits:
# memory: 800Mi
# cpu: 500m
client:
nodeSelector:
doks.digitalocean.com/node-pool: app
data:
heapSize: "256m"
replicas: 1
nodeSelector:
doks.digitalocean.com/node-pool: elastic
resources:
limits:
memory: 1024Mi
cpu: 0.2
requests:
memory: 512Mi
kibana:
enabled: false
nodeSelector:
doks.digitalocean.com/node-pool: app
logstash:
enabled: false
elasticsearch:
host: elastic-stack-elasticsearch-client
nodeSelector:
doks.digitalocean.com/node-pool: elastic
filebeat:
enabled: false
config:
output.file.enabled: false
output.logstash:
hosts: ["elastic-logstash:5044"]
indexTemplateLoad:
- elastic-stack-elasticsearch-client:9200
fluentd:
enabled: false
elasticsearch:
host: elastic-stack-elasticsearch-client
fluent-bit:
enabled: false
elasticsearch:
host: elastic-stack-elasticsearch-client
fluentd-elasticsearch:
enabled: false
elasticsearch:
host: elastic-stack-elasticsearch-client
elasticsearch-exporter:
enabled: false
nodeSelector:
doks.digitalocean.com/node-pool: elastic
es:
uri: "http://elastic-elasticsearch-client:9200"
......@@ -22,7 +22,7 @@ nginxConf: |
http {
# define proxy upstream to Elasticsearch via loopback interface in
upstream es-http {
server elastic-elasticsearch-client:9200;
server elasticsearch-master:9200;
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
......@@ -96,13 +96,13 @@ nginxConf: |
access_log /dev/stdout main;
upstream es-tcp {
server elastic-elasticsearch-discovery:9300;
server elasticsearch-master:9300;
}
server {
listen 9300;
proxy_pass elastic-elasticsearch-discovery:9300;
proxy_pass elasticsearch-master:9300;
include ./allow.conf;
deny all;
}
......
......@@ -22,7 +22,7 @@ nginxConf: |
http {
# define proxy upstream to Elasticsearch via loopback interface in
upstream es-http {
server elastic-elasticsearch-client:9200;
server elasticsearch-master:9200;
}
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
......@@ -96,13 +96,13 @@ nginxConf: |
access_log /dev/stdout main;
upstream es-tcp {
server elastic-elasticsearch-discovery:9300;
server elasticsearch-master:9300;
}
server {
listen 9300;
proxy_pass elastic-elasticsearch-discovery:9300;
proxy_pass elasticsearch-master:9300;
include ./allow.conf;
deny all;
}
......
minimumMasterNodes: 1
replicas: 1
nodeSelector:
doks.digitalocean.com/node-pool: elastic
esMajorVersion: 7
esJavaOpts: "-Xmx2g -Xms2g"
resources:
requests:
cpu: 1
memory: 1Gi
limits:
cpu: 1.5
memory: 3Gi
......@@ -3,6 +3,7 @@ watchAnyNamespace: true
pullSecret: dodis
kafka:
storageSpace: 20Gi
replicas: 3
resources:
limits:
......
......@@ -3,17 +3,18 @@
acceptLicenseAgreement: "yes"
core:
numberOfServers: 1
extraVars:
- name: NEO4J_dbms.memory.heap.max_size
value: 500M
value: 2048M
- name: NEO4J_dbms_memory_pagecache_size
value: 500M
value: 1024M
resources:
requests:
memory: 256Mi
limits:
memory: 1500Mi
memory: 3Gi
affinity:
nodeAffinity:
......
......@@ -13,7 +13,7 @@ resolver:
memory: 128Mi
cpu: 0.1
extraCommandArgs:
- "--elasticsearch:server=http://elastic-elasticsearch-client"
- "--elasticsearch:server=http://elasticsearch-master"
- "--elasticsearch:port=9200"
- name: resolver-neo4j
image:
......@@ -23,7 +23,7 @@ resolver:
memory: 128Mi
cpu: 0.1
extraCommandArgs:
- "--neo4j:config:uri=bolt+routing://neo4j-neo4j:7687"
- "--neo4j:config:uri=bolt://neo4j-neo4j:7687"
- "--neo4j:config:user=neo4j"
env:
- name: neo4j_config_password
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment