Compare commits

12 Commits

Author SHA1 Message Date
f19d2f3f6b Gitea : certificat 2024-10-28 19:54:55 +01:00
f3943a2b85 Gitea update 2024-10-28 07:45:52 +01:00
2dece804ac Update for Gitea : Traefik configuration (disable port) 2024-10-24 08:00:44 +02:00
7e0eaa837e update for Gitea : Traefik configuration 2024-10-24 07:58:45 +02:00
ec1c47116c add openvpn 2024-10-22 20:07:50 +02:00
19a84b8027 add all port for traefik 2024-10-22 19:52:01 +02:00
7be83a326b add info traefik 2024-10-22 19:21:13 +02:00
ea35f038d3 update 2024-10-20 20:07:57 +02:00
d973fe54b9 ReadMe 2024-10-20 19:55:51 +02:00
df1268a06b ReadMe Harbor 2024-10-20 19:52:55 +02:00
57f4b8df9f Readme Harbor 2024-10-20 19:52:01 +02:00
10012be920 Add Harbor And Update Traefik 2024-10-20 19:47:18 +02:00
36 changed files with 2313 additions and 38 deletions

View File

@ -0,0 +1,3 @@
Additional permission under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or combining it with [name of library] (or a modified version of that library), containing parts covered by the terms of [name of library's license], the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of [name of library] used as well as that of the covered work.

66
Outils/docuseal/README.md Normal file
View File

@ -0,0 +1,66 @@
![Gitea](./img/banniere-Gitea.png)
URL : https://github.com/shlinkio/shlink
# Gitea
Gitea est un service Git auto-h?mable ?crit en GO, et tr?s l?ger. Il est assez similaire ? GitHub, Bitbucket et Gitlab. En plus d'un appui pour le contr?le de la r?vision de Git, il fournit ?galement un suivi des probl?mes et le d?veloppement des pages wiki. Si vous cherchez une alternative ? Gitea, consultez mon article sur Forgejo.
# Téléchargement, Configuration et Lancement
## Téléchargement de Gitea
Saisir la commande pour télécharger la source
```bash
git clone https://git.tips-of-mine.fr/Tips-Of-Mine/Docker.git
```
Saisir la commande pour vous rendre dans le dossier
```bash
cd Repository\Gitea
```
## Modifier la configuration de Gitea
Saisir la commande pour vous rendre dans le dossier
```bash
cd Repository\Gitea
```
Nous éditons le fichier de configuration
```bash
nano .env
```
Nous modifions les variables dont nous avons besoin.
## Lancement de Gitea
Pour utiliser Gitea tout seul
```bash
docker compose up -d
```
Pour utiliser Gitea avec Traefik
```bash
docker compose -f docker-compose-traefik.yml up -d
```
# Utilisation
## Accueil
Ouvrir une page web avec l'url :
Pour une utilisation tout seul
http://10.0.4.29:3000
Pour une utilisation avec Traefik
https://Gitea.traefik.me`)"
# More info
- more information on the website [Tips-Of-Mine](https://www.tips-of-mine.fr/)
# Buy me a coffe
<a href='https://ko-fi.com/R5R2KNI3N' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi4.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>

View File

@ -0,0 +1,131 @@
#### NETWORKS
networks:
traefik_front_network:
external: true
back_network:
driver: bridge
attachable: true
#### SERVICES
services:
### docuseal
docuseal:
container_name: docuseal-app
hostname: docuseal-app
image: docuseal/docuseal:latest
volumes:
- ./docuseal:/data/docuseal
environment:
- FORCE_SSL=https://docuseal.traefik.me
- DATABASE_URL=postgresql://postgres:postgres@postgres:5432/docuseal
depends_on:
postgres:
condition: service_healthy
restart: true
msmtpd:
condition: service_healthy
restart: true
networks:
- traefik_front_network
- back_network
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_front_network"
# HTTP
- "traefik.http.routers.docuseal-http.rule=Host(`docuseal.traefik.me`)"
- "traefik.http.routers.docuseal-http.entrypoints=http"
# HTTPS
- "traefik.http.routers.docuseal-https.rule=Host(`docuseal.traefik.me`)"
- "traefik.http.routers.docuseal-https.entrypoints=https"
- "traefik.http.routers.docuseal-https.service=docuseal-service"
- "traefik.http.routers.docuseal-https.tls=true"
# Middleware
# Service
- "traefik.http.services.docuseal-service.loadbalancer.server.port=3000"
### postgres
postgres:
container_name: docuseal-postgres
hostname: docuseal-postgres
image: postgres:15.6-alpine
environment:
PGDATA: /var/lib/postgresql/data/docuseal
POSTGRES_DB: docuseal
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
networks:
- back_network
restart: always
volumes:
- ./data:/var/lib/postgresql/data:rw
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
### msmtpd
msmtpd:
container_name: docuseal-msmtpd
hostname: docuseal-msmtpd
image: crazymax/msmtpd:latest
networks:
- back_network
environment:
- "TZ=Europe/Paris"
- "PUID=1500"
- "PGID=1500"
- "SMTP_HOST=10.0.4.52"
- "SMTP_PORT=587"
- "SMTP_TLS=on"
- "SMTP_STARTTLS=on"
- "SMTP_TLS_CHECKCERT=off"
- "SMTP_AUTH=on"
- "SMTP_USER=hostinfo@tips-of-mine.fr"
- "SMTP_PASSWORD=Passw0rd#12345"
- "SMTP_DOMAIN=localhost"
- "SMTP_FROM=hostinfo@tips-of-mine.fr"
restart: always
healthcheck:
test: ["CMD-SHELL", "echo EHLO localhost"]
interval: 5s
timeout: 5s
retries: 5
### backups
backups:
container_name: docuseal-postgres-backup
hostname: docuseal-postgres-backup
image: postgres:15.6-alpine
command: >-
sh -c 'sleep 30m &&
while true; do
pg_dump -h postgres -p 5432 -d $POSTGRE_DB_NAME -U $POSTGRE_DB_USER | gzip > $POSTGRES_BACKUPS_PATH/$POSTGRES_BACKUP_NAME-$(date "+%Y-%m-%d_%H-%M").gz &&
tar -zcpf $DATA_BACKUPS_PATH/$DATA_BACKUP_NAME-$(date "+%Y-%m-%d_%H-%M").tar.gz $DATA_PATH &&
find $POSTGRES_BACKUPS_PATH -type f -mtime +$POSTGRES_BACKUP_PRUNE_DAYS | xargs rm -f &&
find $DATA_BACKUPS_PATH -type f -mtime +$DATA_BACKUP_PRUNE_DAYS | xargs rm -f;
sleep $BACKUP_INTERVAL; done'
environment:
GITEA_DB_NAME: postgres
GITEA_DB_USER: postgres
PGPASSWORD: postgres
BACKUP_INIT_SLEEP: 30m
BACKUP_INTERVAL: 24h
POSTGRES_BACKUP_PRUNE_DAYS: 7
DATA_BACKUP_PRUNE_DAYS: 7
POSTGRES_BACKUPS_PATH: /srv/docuseal-postgres/backups
DATA_BACKUPS_PATH: /srv/docuseal-application-data/backups
DATA_PATH: /bitnami/docuseal
POSTGRES_BACKUP_NAME: docuseal-postgres-backup
DATA_BACKUP_NAME: docuseal-application-data-backup
restart: unless-stopped
networks:
- back_network
volumes:
- ./backup/data:/var/lib/postgresql/data
- ./backup/docuseal:/bitnami/docuseal
- ./backup/backups:/srv/gdocuseal-application-data/backups
- ./backup/database:/srv/docuseal-postgres/backups
depends_on:
postgres:
condition: service_healthy

View File

@ -27,7 +27,7 @@ services:
- GITEA__mailer__ENABLED=true
- GITEA__mailer__FROM=gitea@tips-of-mine.fr
- GITEA__mailer__PROTOCOL=smtps
- GITEA__mailer__SMTP_ADDR=relaissmtp.tips-of-mine.fr
- GITEA__mailer__SMTP_ADDR=msmtpd
- GITEA__mailer__SMTP_PORT=25
- GITEA__mailer__USER=
- GITEA__mailer__PASSWD=
@ -54,7 +54,7 @@ services:
- GITEA__service__DEFAULT_ALLOW_CREATE_ORGANIZATION=true
- GITEA__service__DEFAULT_ENABLE_TIMETRACKING=true
- GITEA__service__NO_REPLY_ADDRESS=${GITEA_NOREPLY_EMAIL}
- GITEA__repository__ENABLE_PUSH_CREATE_USER=true
- GITEA__repository__ENABLE_PUSH_CREATE_USER=true
- GITEA__repository__ENABLE_PUSH_CREATE_ORG=true
- GITEA__RUN_MODE=prod
- GITEA__APP_NAME=Gitea for me
@ -68,12 +68,13 @@ services:
- ./log:/app/gitea/log:rw
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3080:3000"
- "222:22"
# ports:
# - "3080:3000"
# - "222:22"
depends_on:
postgres:
condition: service_healthy
restart: true
healthcheck:
test: ["CMD", "curl", "-f", "http://gitea.traefik.me/"]
interval: 10s
@ -82,20 +83,40 @@ services:
start_period: 90s
labels:
- "traefik.enable=true"
- "traefik.docker.network=docker-traefik_front_network"
- "traefik.docker.network=traefik_front_network"
# HTTP
# - "traefik.http.routers.gitea-http.rule=Host(`gitea.tips-of-mine.fr`)"
- "traefik.http.routers.gitea-http.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-http.entrypoints=http"
- "traefik.http.routers.gitea-http.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-http.priority=39"
# HTTPS
# - "traefik.http.routers.gitea-https.rule=Host(`gitea.tips-of-mine.fr`)"
- "traefik.http.routers.gitea-https.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-https.entrypoints=https"
- "traefik.http.routers.gitea-https.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-https.tls=true"
- "traefik.http.routers.gitea.service=gitea-service"
# - "traefik.http.routers.gitea-https.tls.certresolver=myresolver"
- "traefik.http.routers.gitea-https.service=gitea-service"
- "traefik.http.routers.gitea-https.priority=40"
# TCP
# - "traefik.tcp.routers.gitea-ssh.entrypoints=ssh"
# - "traefik.tcp.routers.gitea-ssh.rule=HostSNI(`gitea.traefik.me`)"
# - "traefik.tcp.routers.gitea-ssh.service=gitea-ssh-service"
# - "traefik.tcp.routers.gitea-ssh.tls=true"
# - "traefik.tcp.routers.gitea-ssh.tls.certresolver=myresolver"
# Middleware
# Service
- "traefik.http.services.gitea-service.loadbalancer.server.port=3000"
# - "traefik.http.services.gitea-https-service.loadbalancer.server.scheme=https"
# - "traefik.http.services.gitea-https.loadbalancer.passhostheader=true"
# - "traefik.http.services.gitea-https.loadbalancer.healthcheck.path=/foo"
- "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.hostname=gitea.traefik.me"
- "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.interval=30"
- "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.method=foobar"
# - "traefik.http.services.gitea-https.loadbalancer.healthcheck.status=42"
# - "traefik.http.services.gitea-https.loadbalancer.healthcheck.port=42"
# - "traefik.http.services.gitea-https.loadbalancer.healthcheck.scheme=http"
- "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.timeout=10"
# - "traefik.tcp.services.gitea-ssh-service.loadbalancer.server.port=22"
# - "traefik.tcp.services.gitea-ssh.loadbalancer.server.tls=true"
# - "traefik.tcp.services.gitea-ssh.loadbalancer.proxyprotocol.version=1"
### postgres
postgres:
@ -132,23 +153,36 @@ services:
depends_on:
postgres:
condition: service_healthy
restart: true
labels:
- "traefik.enable=true"
- "traefik.docker.network=docker-traefik_front_network"
# HTTP
# - "traefik.http.routers.adminer-http.rule=Host(`gitea-adminer.tips-of-mine.fr`)"
- "traefik.http.routers.adminer-http.rule=Host(`gitea-adminer.traefik.me`)"
- "traefik.http.routers.adminer-http.entrypoints=http"
- "traefik.http.routers.adminer-http.rule=Host(`gitea-adminer.traefik.me`)"
- "traefik.http.routers.adminer-http.priority=41"
# HTTPS
# - "traefik.http.routers.adminer-https.rule=Host(`gitea-adminer.tips-of-mine.fr`)"
- "traefik.http.routers.adminer-https.rule=Host(`gitea-adminer.traefik.me`)"
- "traefik.http.routers.adminer-https.entrypoints=https"
- "traefik.http.routers.adminer-https.rule=Host(`gitea-adminer.traefik.me`)"
- "traefik.http.routers.adminer-https.tls=true"
- "traefik.http.routers.adminer.service=adminer-service"
# - "traefik.http.routers.adminer-https.tls.certresolver=myresolver"
- "traefik.http.routers.adminer-https.service=adminer-service"
- "traefik.http.routers.adminer-https.priority=42"
# TCP
# Middleware
# Service
- "traefik.http.services.adminer-service.loadbalancer.server.port=8080"
# - "traefik.http.services.adminer-https-service.loadbalancer.server.scheme=https"
# - "traefik.http.services.adminer-https.loadbalancer.passhostheader=true"
# - "traefik.http.services.adminer-https.loadbalancer.healthcheck.path=/foo"
- "traefik.http.services.adminer-https-service.loadbalancer.healthcheck.hostname=gitea-adminer.traefik.me"
- "traefik.http.services.adminer-https-service.loadbalancer.healthcheck.interval=30"
- "traefik.http.services.adminer-https-service.loadbalancer.healthcheck.method=foobar"
# - "traefik.http.services.adminer-https.loadbalancer.healthcheck.status=42"
# - "traefik.http.services.adminer-https.loadbalancer.healthcheck.port=42"
# - "traefik.http.services.adminer-https.loadbalancer.healthcheck.scheme=http"
- "traefik.http.services.adminer-https-service.loadbalancer.healthcheck.timeout=10"
### backups
backups:
container_name: gitea-postgres-backup
@ -186,6 +220,7 @@ services:
depends_on:
postgres:
condition: service_healthy
restart: true
### runner
runner:
@ -194,18 +229,49 @@ services:
image: ${GITEA_RUNNER_IMAGE_TAG}
environment:
- GITEA_INSTANCE_URL=${GITEA_URL}
- CONFIG_FILE=/config.yaml
- CONFIG_FILE=/config.yaml
- GITEA_RUNNER_REGISTRATION_TOKEN=8CZ3Sd2VHITGIA1Xr2bo5j8l5Pj1EvTblTCeDmJU
- GITEA_RUNNER_NAME=gitea-runner
- GITEA_RUNNER_LABELS=ubuntu-20.04:docker://registry.traefik.me/gitea/default-image:ubuntu-20.0
restart: unless-stopped
networks:
- back_network
volumes:
- ./data-runner/act_runner:/data
- ./config.yaml:/config.yaml
- ./data-runner/cache:/root/.cache
# - ./data-runner:/data
# - ./config.yaml:/config.yaml
# - ./data-runner/cache:/root/.cache
- /etc/ssl/certs/:/etc/ssl/certs/
- /var/run/docker.sock:/var/run/docker.sock
privileged: true
depends_on:
gitea:
condition: service_healthy
restart: true
### msmtpd
msmtpd:
container_name: gitea-msmtpd
hostname: gitea-msmtpd
image: crazymax/msmtpd:latest
networks:
- back_network
# ports:
# - 2500:2500
environment:
- "TZ=Europe/Paris"
- "PUID=1500"
- "PGID=1500"
- "SMTP_HOST=10.0.4.52"
- "SMTP_PORT=587"
- "SMTP_TLS=on"
- "SMTP_STARTTLS=on"
- "SMTP_TLS_CHECKCERT=off"
- "SMTP_AUTH=on"
- "SMTP_USER=hostinfo@tips-of-mine.fr"
- "SMTP_PASSWORD=Whf2VtLEd2QR4er"
- "SMTP_DOMAIN=localhost"
- "SMTP_FROM=hostinfo@tips-of-mine.fr"
restart: always
healthcheck:
test: ["CMD-SHELL", "echo EHLO localhost"]
interval: 5s

View File

@ -0,0 +1,7 @@
# Dockerfile
FROM gitea/runner-images:ubuntu-20.04
# Gitea HTTPS pem
COPY ./cert.pem /etc/ssl/certs/cert.pem
COPY ./chain.pem /etc/ssl/certs/chain.pem
COPY ./fullchain.pem /etc/ssl/certs/fullchain.pem

View File

@ -0,0 +1,4 @@
docker build --tag gitea/default-image:ubuntu-20.04 .
docker push registry.traefik.me/default-image:ubuntu-20.04

View File

@ -26,9 +26,80 @@ Saisir la commande pour vous rendre dans le dossier
cd Repository\Harbor
```
Modifier le fichier Habor.yml
```bash
nano harbor.yml
```
Commenter les lignes comme ci-dessous, ainsi que de modifier le port par default
```bash
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: registry.traefik.me
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 8083
# https related config
##https:
# https port for harbor, default is 443
## port: 443
# The path of cert and key files for nginx
## certificate: /your/certificate/path
## private_key: /your/private/key/path
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# # Harbor will set ipv4 enabled only by default if this block is not configured
# # Otherwise, please uncomment this block to configure your own ip_family stacks
# ip_family:
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
# ipv6:
# enabled: false
# # ipv4Enabled set to true by default, currently it affected the nginx related component
# ipv4:
# enabled: true
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
external_url: https://registry.traefik.me
```
Nous éditons le fichier de configuration
```bash
nano .env
./prepare --with-trivy
```
Stop Harbor
```bash
docker compose stop && docker compose rm -f
```
Editer le fichier de configuration Nginx
```bash
nano common/config/nginx/nginx.conf
```
Commentaire les lignes
```bash
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
```
Relancer Harbor
```bash
docker compose up -d
```
Nous modifions les variables dont nous avons besoin.

138
Repository/Harbor/common.sh Normal file
View File

@ -0,0 +1,138 @@
#!/bin/bash
#docker version: 20.10.10+
#docker-compose version: 1.18.0+
#golang version: 1.12.0+
set +e
set -o noglob
#
# Set Colors
#
bold=$(tput bold)
underline=$(tput sgr 0 1)
reset=$(tput sgr0)
red=$(tput setaf 1)
green=$(tput setaf 76)
white=$(tput setaf 7)
tan=$(tput setaf 202)
blue=$(tput setaf 25)
#
# Headers and Logging
#
underline() { printf "${underline}${bold}%s${reset}\n" "$@"
}
h1() { printf "\n${underline}${bold}${blue}%s${reset}\n" "$@"
}
h2() { printf "\n${underline}${bold}${white}%s${reset}\n" "$@"
}
debug() { printf "${white}%s${reset}\n" "$@"
}
info() { printf "${white}➜ %s${reset}\n" "$@"
}
success() { printf "${green}✔ %s${reset}\n" "$@"
}
error() { printf "${red}✖ %s${reset}\n" "$@"
}
warn() { printf "${tan}➜ %s${reset}\n" "$@"
}
bold() { printf "${bold}%s${reset}\n" "$@"
}
note() { printf "\n${underline}${bold}${blue}Note:${reset} ${blue}%s${reset}\n" "$@"
}
set -e
function check_golang {
if ! go version &> /dev/null
then
warn "No golang package in your environment. You should use golang docker image build binary."
return
fi
# golang has been installed and check its version
if [[ $(go version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
then
golang_version=${BASH_REMATCH[1]}
golang_version_part1=${BASH_REMATCH[2]}
golang_version_part2=${BASH_REMATCH[3]}
# the version of golang does not meet the requirement
if [ "$golang_version_part1" -lt 1 ] || ([ "$golang_version_part1" -eq 1 ] && [ "$golang_version_part2" -lt 12 ])
then
warn "Better to upgrade golang package to 1.12.0+ or use golang docker image build binary."
return
else
note "golang version: $golang_version"
fi
else
warn "Failed to parse golang version."
return
fi
}
function check_docker {
if ! docker --version &> /dev/null
then
error "Need to install docker(20.10.10+) first and run this script again."
exit 1
fi
# docker has been installed and check its version
if [[ $(docker --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
then
docker_version=${BASH_REMATCH[1]}
docker_version_part1=${BASH_REMATCH[2]}
docker_version_part2=${BASH_REMATCH[3]}
note "docker version: $docker_version"
# the version of docker does not meet the requirement
if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ])
then
error "Need to upgrade docker package to 20.10.10+."
exit 1
fi
else
error "Failed to parse docker version."
exit 1
fi
}
function check_dockercompose {
if [! docker compose version] &> /dev/null || [! docker-compose --version] &> /dev/null
then
error "Need to install docker-compose(1.18.0+) or a docker-compose-plugin (https://docs.docker.com/compose/)by yourself first and run this script again."
exit 1
fi
# either docker compose plugin has been installed
if docker compose version &> /dev/null
then
note "$(docker compose version)"
DOCKER_COMPOSE="docker compose"
# or docker-compose has been installed, check its version
elif [[ $(docker-compose --version) =~ (([0-9]+)\.([0-9]+)([\.0-9]*)) ]]
then
docker_compose_version=${BASH_REMATCH[1]}
docker_compose_version_part1=${BASH_REMATCH[2]}
docker_compose_version_part2=${BASH_REMATCH[3]}
note "docker-compose version: $docker_compose_version"
# the version of docker-compose does not meet the requirement
if [ "$docker_compose_version_part1" -lt 1 ] || ([ "$docker_compose_version_part1" -eq 1 ] && [ "$docker_compose_version_part2" -lt 18 ])
then
error "Need to upgrade docker-compose package to 1.18.0+."
exit 1
fi
else
error "Failed to parse docker-compose version."
exit 1
fi
}

View File

@ -0,0 +1,6 @@
appname = Harbor
runmode = prod
enablegzip = true
[prod]
httpport = 8080

View File

@ -0,0 +1,51 @@
CONFIG_PATH=/etc/core/app.conf
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
_REDIS_URL_CORE=redis://redis:6379?idle_timeout_seconds=30
SYNC_QUOTA=true
_REDIS_URL_REG=redis://redis:6379/1?idle_timeout_seconds=30
LOG_LEVEL=info
EXT_ENDPOINT=https://registry.traefik.me
DATABASE_TYPE=postgresql
POSTGRESQL_HOST=postgresql
POSTGRESQL_PORT=5432
POSTGRESQL_USERNAME=postgres
POSTGRESQL_PASSWORD=root123
POSTGRESQL_DATABASE=registry
POSTGRESQL_SSLMODE=disable
POSTGRESQL_MAX_IDLE_CONNS=100
POSTGRESQL_MAX_OPEN_CONNS=1000
POSTGRESQL_CONN_MAX_LIFETIME=5m
POSTGRESQL_CONN_MAX_IDLE_TIME=0
REGISTRY_URL=http://registry:5000
PORTAL_URL=http://portal:8080
TOKEN_SERVICE_URL=http://core:8080/service/token
HARBOR_ADMIN_PASSWORD=Harbor12345
MAX_JOB_WORKERS=10
CORE_SECRET=M6GdkMmjBufXLqN5
JOBSERVICE_SECRET=c8Au1Hk9BjPy0N9h
WITH_TRIVY=True
CORE_URL=http://core:8080
CORE_LOCAL_URL=http://127.0.0.1:8080
JOBSERVICE_URL=http://jobservice:8080
TRIVY_ADAPTER_URL=http://trivy-adapter:8080
REGISTRY_STORAGE_PROVIDER_NAME=filesystem
READ_ONLY=false
RELOAD_KEY=
REGISTRY_CONTROLLER_URL=http://registryctl:8080
REGISTRY_CREDENTIAL_USERNAME=harbor_registry_user
REGISTRY_CREDENTIAL_PASSWORD=kgDZdlQTjZDmwDvO97aUELPEQfPT1woN
CSRF_KEY=miI1hsOC4q36kMcq5wswiQWOTtroIVsZ
ROBOT_SCANNER_NAME_PREFIX=3zdTeO5W
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
HTTP_PROXY=
HTTPS_PROXY=
NO_PROXY=
PORT=8080
QUOTA_UPDATE_PROVIDER=db

View File

@ -0,0 +1 @@
POSTGRES_PASSWORD=root123

View File

@ -0,0 +1,46 @@
---
#Protocol used to serve
protocol: "http"
#Server listening port
port: 8080
#Worker pool
worker_pool:
#Worker concurrency
workers: 10
backend: "redis"
#Additional config if use 'redis' backend
redis_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
redis_url: redis://redis:6379/2?idle_timeout_seconds=30
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
#Loggers for the running job
job_loggers:
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
- name: "STD_OUTPUT"
level: "INFO" # INFO/DEBUG/WARNING/ERROR/FATAL
- name: "FILE"
level: "INFO"
settings: # Customized settings of logger
base_dir: "/var/log/jobs"
sweeper:
duration: 1 #days
settings: # Customized settings of sweeper
work_dir: "/var/log/jobs"
#Loggers for the job service
loggers:
- name: "STD_OUTPUT" # Same with above
level: "INFO"
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24,
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168
# the max size of job log returned by API, default is 10M
max_retrieve_size_mb: 10

View File

@ -0,0 +1,17 @@
CORE_SECRET=M6GdkMmjBufXLqN5
REGISTRY_URL=http://registry:5000
JOBSERVICE_SECRET=c8Au1Hk9BjPy0N9h
CORE_URL=http://core:8080
REGISTRY_CONTROLLER_URL=http://registryctl:8080
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY=3
JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT=3
HTTP_PROXY=
HTTPS_PROXY=
NO_PROXY=
REGISTRY_CREDENTIAL_USERNAME=harbor_registry_user
REGISTRY_CREDENTIAL_PASSWORD=kgDZdlQTjZDmwDvO97aUELPEQfPT1woN

View File

@ -0,0 +1,8 @@
/var/log/docker/*.log {
rotate 50
size 200M
copytruncate
compress
missingok
nodateext
}

View File

@ -0,0 +1,7 @@
# Rsyslog configuration file for docker.
template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
if $programname != "rsyslogd" then {
action(type="omfile" dynaFile="DynaFile")
}

View File

@ -0,0 +1,119 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 3096;
use epoll;
multi_accept on;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream core {
server core:8080;
}
upstream portal {
server portal:8080;
}
log_format timed_combined '$remote_addr - '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time $pipe';
access_log /dev/stdout timed_combined;
map $http_x_forwarded_proto $x_forwarded_proto {
default $http_x_forwarded_proto;
"" $scheme;
}
server {
listen 8080;
server_tokens off;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# Add extra headers
add_header X-Frame-Options DENY;
add_header Content-Security-Policy "frame-ancestors 'none'";
# customized location config file can place to /etc/nginx/etc with prefix harbor.http. and suffix .conf
include /etc/nginx/conf.d/harbor.http.*.conf;
location / {
proxy_pass http://portal/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /c/ {
proxy_pass http://core/c/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /api/ {
proxy_pass http://core/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://core/v2/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
proxy_send_timeout 900;
proxy_read_timeout 900;
}
location /service/ {
proxy_pass http://core/service/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
}

View File

@ -0,0 +1,42 @@
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location /devcenter-api-2.0 {
try_files $uri $uri/ /swagger-ui-index.html;
}
location / {
try_files $uri $uri/ /index.html;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
}
}
}

View File

@ -0,0 +1,43 @@
version: 0.1
log:
level: info
fields:
service: registry
storage:
cache:
layerinfo: redis
filesystem:
rootdirectory: /storage
maintenance:
uploadpurging:
enabled: true
age: 168h
interval: 24h
dryrun: false
delete:
enabled: true
redis:
addr: redis:6379
readtimeout: 10s
writetimeout: 10s
dialtimeout: 10s
password:
db: 1
pool:
maxidle: 100
maxactive: 500
idletimeout: 60s
http:
addr: :5000
secret: placeholder
debug:
addr: localhost:5001
auth:
htpasswd:
realm: harbor-registry-basic-realm
path: /etc/registry/passwd
validation:
disabled: true
compatibility:
schema1:
enabled: true

View File

@ -0,0 +1 @@
harbor_registry_user:$2y$05$FCbp41JTQ3jGiNuk4RekiOAHvKwcY5b2xE9ql6dAt6yngVIyhzsAa

View File

@ -0,0 +1,5 @@
---
protocol: "http"
port: 8080
log_level: info
registry_config: "/etc/registry/config.yml"

View File

@ -0,0 +1,2 @@
CORE_SECRET=M6GdkMmjBufXLqN5
JOBSERVICE_SECRET=c8Au1Hk9BjPy0N9h

View File

@ -0,0 +1,21 @@
SCANNER_LOG_LEVEL=info
SCANNER_REDIS_URL=redis://redis:6379/5?idle_timeout_seconds=30
SCANNER_STORE_REDIS_URL=redis://redis:6379/5?idle_timeout_seconds=30
SCANNER_STORE_REDIS_NAMESPACE=harbor.scanner.trivy:store
SCANNER_JOB_QUEUE_REDIS_URL=redis://redis:6379/5?idle_timeout_seconds=30
SCANNER_JOB_QUEUE_REDIS_NAMESPACE=harbor.scanner.trivy:job-queue
SCANNER_TRIVY_CACHE_DIR=/home/scanner/.cache/trivy
SCANNER_TRIVY_REPORTS_DIR=/home/scanner/.cache/reports
SCANNER_TRIVY_VULN_TYPE=os,library
SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
SCANNER_TRIVY_IGNORE_UNFIXED=False
SCANNER_TRIVY_SKIP_UPDATE=False
SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE=False
SCANNER_TRIVY_OFFLINE_SCAN=False
SCANNER_TRIVY_SECURITY_CHECKS=vuln
SCANNER_TRIVY_GITHUB_TOKEN=
SCANNER_TRIVY_INSECURE=False
SCANNER_TRIVY_TIMEOUT=5m0s
HTTP_PROXY=
HTTPS_PROXY=
NO_PROXY=

View File

@ -1,8 +1,283 @@
#### NETWORKS
networks:
back_network:
driver: bridge
attachable: true
#### SERVICES
version: '2.3'
services:
log:
image: goharbor/harbor-log:v2.11.1
container_name: harbor-log
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /var/log/harbor/:/var/log/docker/:z
- type: bind
source: ./common/config/log/logrotate.conf
target: /etc/logrotate.d/logrotate.conf
- type: bind
source: ./common/config/log/rsyslog_docker.conf
target: /etc/rsyslog.d/rsyslog_docker.conf
ports:
- 127.0.0.1:1514:10514
networks:
- harbor
registry:
image: goharbor/registry-photon:v2.11.1
container_name: registry
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: /data/secret/registry/root.crt
target: /etc/registry/root.crt
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registry"
registryctl:
image: goharbor/harbor-registryctl:v2.11.1
container_name: registryctl
env_file:
- ./common/config/registryctl/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /data/registry:/storage:z
- ./common/config/registry/:/etc/registry/:z
- type: bind
source: ./common/config/registryctl/config.yml
target: /etc/registryctl/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "registryctl"
postgresql:
image: goharbor/harbor-db:v2.11.1
container_name: harbor-db
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
volumes:
- /data/database:/var/lib/postgresql/data:z
networks:
harbor:
env_file:
- ./common/config/db/env
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "postgresql"
shm_size: '1gb'
core:
image: goharbor/harbor-core:v2.11.1
container_name: harbor-core
env_file:
- ./common/config/core/env
restart: always
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
volumes:
- /data/ca_download/:/etc/core/ca/:z
- /data/:/data/:z
- ./common/config/core/certificates/:/etc/core/certificates/:z
- type: bind
source: ./common/config/core/app.conf
target: /etc/core/app.conf
- type: bind
source: /data/secret/core/private_key.pem
target: /etc/core/private_key.pem
- type: bind
source: /data/secret/keys/secretkey
target: /etc/core/key
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
harbor:
depends_on:
- log
- registry
- redis
- postgresql
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "core"
portal:
image: goharbor/harbor-portal:v2.11.1
container_name: harbor-portal
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- type: bind
source: ./common/config/portal/nginx.conf
target: /etc/nginx/nginx.conf
networks:
- harbor
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "portal"
jobservice:
image: goharbor/harbor-jobservice:v2.11.1
container_name: harbor-jobservice
env_file:
- ./common/config/jobservice/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /data/job_logs:/var/log/jobs:z
- type: bind
source: ./common/config/jobservice/config.yml
target: /etc/jobservice/config.yml
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
depends_on:
- core
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "jobservice"
redis:
image: goharbor/redis-photon:v2.11.1
container_name: redis
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- /data/redis:/var/lib/redis
networks:
harbor:
depends_on:
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "redis"
proxy:
image: goharbor/nginx-photon:v2.11.1
container_name: nginx
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- ./common/config/nginx:/etc/nginx:z
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
networks:
- harbor
ports:
- 8083:8080
depends_on:
- registry
- core
- portal
- log
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "proxy"
trivy-adapter:
container_name: trivy-adapter
image: goharbor/trivy-adapter-photon:v2.11.1
restart: always
cap_drop:
- ALL
depends_on:
- log
- redis
networks:
- harbor
volumes:
- type: bind
source: /data/trivy-adapter/trivy
target: /home/scanner/.cache/trivy
- type: bind
source: /data/trivy-adapter/reports
target: /home/scanner/.cache/reports
- type: bind
source: ./common/config/shared/trust-certificates
target: /harbor_cust_cert
logging:
driver: "syslog"
options:
syslog-address: "tcp://localhost:1514"
tag: "trivy-adapter"
env_file:
./common/config/trivy-adapter/env
networks:
harbor:
external: false

View File

@ -0,0 +1,316 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: registry.traefik.me
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 8083
# https related config
##https:
# https port for harbor, default is 443
## port: 443
# The path of cert and key files for nginx
## certificate: /your/certificate/path
## private_key: /your/private/key/path
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# # Harbor will set ipv4 enabled only by default if this block is not configured
# # Otherwise, please uncomment this block to configure your own ip_family stacks
# ip_family:
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
# ipv6:
# enabled: false
# # ipv4Enabled set to true by default, currently it affected the nginx related component
# ipv4:
# enabled: true
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
external_url: https://registry.traefik.me
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: Harbor12345
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 100
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: 1000
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_lifetime: 5m
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_idle_time: 0
# The default data volume
data_volume: /data
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
# # and https://distribution.github.io/distribution/storage-drivers/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disable: false
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false
#
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
skip_java_db_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesn't affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
offline_scan: false
#
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
security_check: vuln
#
# insecure The flag to skip verifying registry certificate
insecure: false
#
# timeout The duration to wait for scan completion.
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
timeout: 5m0s
#
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
# github_token: xxx
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
job_loggers:
- STD_OUTPUT
- FILE
# - DB
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
logger_sweeper_duration: 1 #days
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 3
# HTTP client timeout for webhook job
webhook_job_http_client_timeout: 3 #seconds
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.11.0
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
# # there's a known issue when using external redis username ref:https://github.com/goharbor/harbor/issues/18892
# # if you care about the image pull/push performance, please refer to this https://github.com/goharbor/harbor/wiki/Harbor-FAQs#external-redis-username-password-usage
# # username:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# trivy_db_index: 5
# idle_timeout_seconds: 30
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
##proxy:
## http_proxy:
## https_proxy:
## no_proxy:
## components:
## - core
## - jobservice
## - trivy
metric:
enabled: false
port: 9090
path: /metrics
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differentiate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # # jaeger should be 1.26 or newer.
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # # export trace data by jaeger.thrift in compact mode
# # agent_port: 6831
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # # timeout is in seconds
# # timeout: 10
# Enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
# Cache layer configurations
# If this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which can especially help to improve the performance of high concurrent
# manifest pulling.
# NOTICE
# If you are deploying Harbor in HA mode, make sure that all the harbor
# instances have the same behaviour, all with caching enabled or disabled,
# otherwise it can lead to potential data inconsistency.
cache:
# not enabled by default
enabled: false
# keep cache for one day by default
expire_hours: 24
# Harbor core configurations
# Uncomment to enable the following harbor core related configuration items.
# core:
# # The provider for updating project quota(usage), there are 2 options, redis or db,
# # by default is implemented by db but you can switch the updation via redis which
# # can improve the performance of high concurrent pushing to the same project,
# # and reduce the database connections spike and occupies.
# # By redis will bring up some delay for quota usage updation for display, so only
# # suggest switch provider to redis if you were ran into the db connections spike around
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
# quota_update_provider: redis # Or db

View File

@ -0,0 +1,316 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: reg.mydomain.com
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
certificate: /your/certificate/path
private_key: /your/private/key/path
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# # Harbor will set ipv4 enabled only by default if this block is not configured
# # Otherwise, please uncomment this block to configure your own ip_family stacks
# ip_family:
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
# ipv6:
# enabled: false
# # ipv4Enabled set to true by default, currently it affected the nginx related component
# ipv4:
# enabled: true
# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: Harbor12345
# Harbor DB configuration
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 100
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: 900
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_lifetime: 5m
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
conn_max_idle_time: 0
# The default data volume
data_volume: /data
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
# # and https://distribution.github.io/distribution/storage-drivers/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disable: false
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
ignore_unfixed: false
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false
#
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
skip_java_db_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# It would work if all the dependencies are in local.
# This option doesn't affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
offline_scan: false
#
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
security_check: vuln
#
# insecure The flag to skip verifying registry certificate
insecure: false
#
# timeout The duration to wait for scan completion.
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
timeout: 5m0s
#
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
# github_token: xxx
jobservice:
# Maximum number of job workers in job service
max_job_workers: 10
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
job_loggers:
- STD_OUTPUT
- FILE
# - DB
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
logger_sweeper_duration: 1 #days
notification:
# Maximum retry count for webhook job
webhook_job_max_retry: 3
# HTTP client timeout for webhook job
webhook_job_http_client_timeout: 3 #seconds
# Log configurations
log:
# options are debug, info, warning, error, fatal
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.11.0
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
# # there's a known issue when using external redis username ref:https://github.com/goharbor/harbor/issues/18892
# # if you care about the image pull/push performance, please refer to this https://github.com/goharbor/harbor/wiki/Harbor-FAQs#external-redis-username-password-usage
# # username:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# trivy_db_index: 5
# idle_timeout_seconds: 30
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy
# metric:
# enabled: false
# port: 9090
# path: /metrics
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differentiate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # # jaeger should be 1.26 or newer.
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # # export trace data by jaeger.thrift in compact mode
# # agent_port: 6831
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # # timeout is in seconds
# # timeout: 10
# Enable purge _upload directories
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
# Cache layer configurations
# If this feature enabled, harbor will cache the resource
# `project/project_metadata/repository/artifact/manifest` in the redis
# which can especially help to improve the performance of high concurrent
# manifest pulling.
# NOTICE
# If you are deploying Harbor in HA mode, make sure that all the harbor
# instances have the same behaviour, all with caching enabled or disabled,
# otherwise it can lead to potential data inconsistency.
cache:
# not enabled by default
enabled: false
# keep cache for one day by default
expire_hours: 24
# Harbor core configurations
# Uncomment to enable the following harbor core related configuration items.
# core:
# # The provider for updating project quota(usage), there are 2 options, redis or db,
# # by default is implemented by db but you can switch the updation via redis which
# # can improve the performance of high concurrent pushing to the same project,
# # and reduce the database connections spike and occupies.
# # By redis will bring up some delay for quota usage updation for display, so only
# # suggest switch provider to redis if you were ran into the db connections spike around
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
# quota_update_provider: redis # Or db

View File

@ -0,0 +1,80 @@
#!/bin/bash
set -e
DIR="$(cd "$(dirname "$0")" && pwd)"
source $DIR/common.sh
set +o noglob
usage=$'Please set hostname and other necessary attributes in harbor.yml first. DO NOT use localhost or 127.0.0.1 for hostname, because Harbor needs to be accessed by external clients.
Please set --with-trivy if needs enable Trivy in Harbor.
Please do NOT set --with-chartmuseum, as chartmusuem has been deprecated and removed.
Please do NOT set --with-notary, as notary has been deprecated and removed.'
item=0
# clair is deprecated
with_clair=$false
# trivy is not enabled by default
with_trivy=$false
# flag to using docker compose v1 or v2, default would using v1 docker-compose
DOCKER_COMPOSE=docker-compose
while [ $# -gt 0 ]; do
case $1 in
--help)
note "$usage"
exit 0;;
--with-trivy)
with_trivy=true;;
*)
note "$usage"
exit 1;;
esac
shift || true
done
workdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $workdir
h2 "[Step $item]: checking if docker is installed ..."; let item+=1
check_docker
h2 "[Step $item]: checking docker-compose is installed ..."; let item+=1
check_dockercompose
if [ -f harbor*.tar.gz ]
then
h2 "[Step $item]: loading Harbor images ..."; let item+=1
docker load -i ./harbor*.tar.gz
fi
echo ""
h2 "[Step $item]: preparing environment ..."; let item+=1
if [ -n "$host" ]
then
sed "s/^hostname: .*/hostname: $host/g" -i ./harbor.yml
fi
h2 "[Step $item]: preparing harbor configs ..."; let item+=1
prepare_para=
if [ $with_trivy ]
then
prepare_para="${prepare_para} --with-trivy"
fi
./prepare $prepare_para
echo ""
if [ -n "$DOCKER_COMPOSE ps -q" ]
then
note "stopping existing Harbor instance ..."
$DOCKER_COMPOSE down -v
fi
echo ""
h2 "[Step $item]: starting Harbor ..."
$DOCKER_COMPOSE up -d
success $"----Harbor has been installed and started successfully.----"

64
Repository/Harbor/prepare Normal file
View File

@ -0,0 +1,64 @@
#!/bin/bash
set -e
# If compiling source code this dir is harbor's make dir.
# If installing harbor via package, this dir is harbor's root dir.
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
harbor_prepare_path=$HARBOR_BUNDLE_DIR
else
harbor_prepare_path="$( cd "$(dirname "$0")" ; pwd -P )"
fi
echo "prepare base dir is set to ${harbor_prepare_path}"
# Clean up input dir
rm -rf ${harbor_prepare_path}/input
# Create a input dirs
mkdir -p ${harbor_prepare_path}/input
input_dir=${harbor_prepare_path}/input
# Copy harbor.yml to input dir
if [[ ! "$1" =~ ^\-\- ]] && [ -f "$1" ]
then
cp $1 $input_dir/harbor.yml
shift
else
if [ -f "${harbor_prepare_path}/harbor.yml" ];then
cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml
else
echo "no config file: ${harbor_prepare_path}/harbor.yml"
exit 1
fi
fi
data_path=$(grep '^[^#]*data_volume:' $input_dir/harbor.yml | awk '{print $NF}')
# If previous secretkeys exist, move it to new location
previous_secretkey_path=/data/secretkey
previous_defaultalias_path=/data/defaultalias
if [ -f $previous_secretkey_path ]; then
mkdir -p $data_path/secret/keys
mv $previous_secretkey_path $data_path/secret/keys
fi
if [ -f $previous_defaultalias_path ]; then
mkdir -p $data_path/secret/keys
mv $previous_defaultalias_path $data_path/secret/keys
fi
# Create secret dir
secret_dir=${data_path}/secret
config_dir=$harbor_prepare_path/common/config
# Run prepare script
docker run --rm -v $input_dir:/input \
-v $data_path:/data \
-v $harbor_prepare_path:/compose_location \
-v $config_dir:/config \
-v /:/hostfs \
--privileged \
goharbor/prepare:v2.11.1 prepare $@
echo "Clean up the input dir"
# Clean up input dir
rm -rf ${harbor_prepare_path}/input

3
Reseaux/Openvpn/LICENSE Normal file
View File

@ -0,0 +1,3 @@
Additional permission under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or combining it with [name of library] (or a modified version of that library), containing parts covered by the terms of [name of library's license], the licensors of this Program grant you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of [name of library] used as well as that of the covered work.

66
Reseaux/Openvpn/README.md Normal file
View File

@ -0,0 +1,66 @@
![OpenSpeedTest](./img/banniere-OpenSpeedTest.png)
URL : https://www.uvdesk.com/en/
# OpenSpeedTest
OpenSpeedTest est le test de bande passante en ligne le plus pr?cis et le plus puissant. Vous pouvez l'utiliser pour tester votre vitesse de r?seau local/bureau, y compris votre r?seau local/Wi-Fi. L'outil est con?u pour r?pliquer votre vitesse de connexion r?elle.
# Téléchargement, Configuration et Lancement
## Téléchargement de OpenSpeedTest
Saisir la commande pour télécharger la source
```bash
git clone https://git.tips-of-mine.fr/Tips-Of-Mine/Docker.git
```
Saisir la commande pour vous rendre dans le dossier
```bash
cd Reseaux\OpenSpeedTest
```
## Modifier la configuration de OpenSpeedTest
Saisir la commande pour vous rendre dans le dossier
```bash
cd Reseaux\OpenSpeedTest
```
Nous éditons le fichier de configuration
```bash
nano .env
```
Nous modifions les variables dont nous avons besoin.
## Lancement de OpenSpeedTest
Pour utiliser OpenSpeedTest tout seul
```bash
docker compose up -d
```
Pour utiliser OpenSpeedTest avec Traefik
```bash
docker compose -f docker-compose-traefik.yml up -d
```
# Utilisation
## Accueil
Ouvrir une page web avec l'url :
Pour une utilisation tout seul
http://10.0.4.29:3000
Pour une utilisation avec Traefik
https://OpenSpeedTest.10.0.4.29.traefik.me`)"
# More info
- more information on the website [Tips-Of-Mine](https://www.tips-of-mine.fr/)
# Buy me a coffe
<a href='https://ko-fi.com/R5R2KNI3N' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi4.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>

View File

@ -0,0 +1,72 @@
#### NETWORKS
networks:
docker-traefik_front_network:
external: true
back_network:
driver: bridge
attachable: true
external: false
#### SERVICES
services:
### openvpn-tcp
openvpn-tcp:
container_name: openvpn-tcp
hostname: openvpn-tcp
image: kylemanna/openvpn:2.4
command: ovpn_run --proto tcp
volumes:
- ./data/openvpn:/etc/openvpn
networks:
- docker-traefik_front_network
cap_add:
- NET_ADMIN
restart: always
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_front_network"
- "traefik.tcp.services.openvpn-tcp.loadBalancer.server.port=1194"
# openvpn does not not support SNI, we provide wildcard
- "traefik.tcp.routers.openvpn-tcp.rule=HostSNI(`*`)"
- "traefik.tcp.routers.openvpn-tcp.entrypoints=websecure"
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_front_network"
# HTTP
- "traefik.http.routers.gitea-http.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-http.entrypoints=http"
# HTTPS
- "traefik.http.routers.gitea-https.rule=Host(`gitea.traefik.me`)"
- "traefik.http.routers.gitea-https.entrypoints=https"
- "traefik.http.routers.gitea-https.tls=true"
- "traefik.http.routers.gitea.service=gitea-service"
# SSH
- "traefik.tcp.routers.gitea-ssh.rule=HostSNI(`*`)"
- "traefik.tcp.routers.gitea-ssh.entrypoints=ssh"
- "traefik.tcp.routers.gitea-ssh.service=gitea-ssh-service"
# Middleware
# Service
- "traefik.http.services.gitea-service.loadbalancer.server.port=3000"
- "traefik.tcp.services.gitea-ssh-service.loadbalancer.server.port=22"
### openvpn-udp
openvpn-udp:
container_name: openvpn-udp
hostname: openvpn-udp
image: kylemanna/openvpn:2.4
command: ovpn_run --proto udp
volumes:
- ./data/openvpn:/etc/openvpn
networks:
- docker-traefik_front_network
cap_add:
- NET_ADMIN
restart: always
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_front_network"
- "traefik.udp.services.openvpn-udp.loadBalancer.server.port=1194"
- "traefik.udp.routers.openvpn-udp.service=openvpn-udp"
- "traefik.udp.routers.openvpn-udp.entrypoints=openvpn"

View File

@ -1,16 +1,16 @@
http:
routers:
traefik:
rule: Host(`dashboard.10.0.4.29.traefik.me`)
rule: Host(`dashboard.traefik.me`)
entryPoints:
- https
service: api@internal
middlewares:
- dashboardauth
tls:
certResolver: letsencrypt
certResolver: production
traefik-http-redirect:
rule: Host(`dashboard.10.0.4.29.traefik.me`)
rule: Host(`dashboard.traefik.me`)
entryPoints:
- http
service: api@internal

View File

@ -48,3 +48,58 @@ http:
regex: "^https?://www\\.(.+)"
# How to modify the URL to have the new target URL
replacement: "https://${1}"
# default-headers:
# headers:
# frameDeny: true
# browserXssFilter: true
# contentTypeNosniff: true
# forceSTSHeader: true
# stsIncludeSubdomains: true
# stsPreload: true
# stsSeconds: 15552000
# customFrameOptionsValue: SAMEORIGIN
# customRequestHeaders:
# X-Forwarded-Proto: https
# crowdsec:
# plugin:
# bouncer:
# enabled: true
# logLevel: INFO
# updateIntervalSeconds: 15
# updateMaxFailure: 0
# defaultDecisionSeconds: 15
# httpTimeoutSeconds: 10
# crowdsecMode: stream
# crowdsecAppsecEnabled: true
# crowdsecAppsecHost: crowdsec:7422
# crowdsecAppsecFailureBlock: true
# crowdsecAppsecUnreachableBlock: true
# crowdsecLapiKey: #####REPLACE_API_KEY##### # Replace CrowdSec API key (docker exec crowdsec cscli bouncers add crowdsecBouncer)
# crowdsecLapiKeyFile: /etc/traefik/cs-privateKey-foo
# crowdsecLapiHost: crowdsec:8080
# crowdsecLapiScheme: http
# forwardedHeadersTrustedIPs:
# - 10.0.35.4/32 # Cloudflare tunnel IP address
# - 172.30.0.0/24 # Reverse Proxy IP address
# clientTrustedIPs:
# - 10.0.1.0/24 # Internal LAN IP addresses
# - 10.0.2.0/24 # Internal LAN IP addresses
# - 10.0.3.0/24 # Internal LAN IP addresses
# - 10.0.4.0/24 # Internal LAN IP addresses
# - 10.0.5.0/24 # Internal LAN IP addresses
# forwardedHeadersCustomName: CF-Connecting-IP # Cloudflare IP address header
# default-whitelist:
# ipWhiteList:
# sourceRange:
# - "10.0.4.0/24"
# - "192.168.0.0/16"
# - "172.16.0.0/12"
# secured:
# chain:
# middlewares:
# - default-whitelist
# - default-headers

View File

@ -0,0 +1,13 @@
http:
routers:
harbor:
service: harbor
rule: "Host(`registry.traefik.me`)"
tls:
certResolver: production
services:
harbor:
loadBalancer:
servers:
- url: "http://registry.traefik.me:8083"

View File

@ -6,24 +6,56 @@ global:
checkNewVersion: true
entryPoints:
ftp:
address: ":21"
ssh:
address: ":22"
smtp:
address: ":25"
dns:
address: ":53"
http:
address: ":80"
# forwardedHeaders:
# insecure: true
forwardedHeaders:
insecure: true
http:
# middlewares: # CHANGE MADE HERE (BOUNCER ENABLED) !!!
# - "crowdsec@file" # CHANGE MADE HERE (BOUNCER ENABLED) !!!
# - "cloudflarewarp@file" # CHANGE MADE HERE (BOUNCER ENABLED) !!!
redirections:
entryPoint:
to: https
scheme: https
pop3:
address: ":110"
imap:
address: ":143"
https:
address: ":443"
# forwardedHeaders:
# insecure: true
forwardedHeaders:
insecure: true
# http:
# middlewares: # CHANGE MADE HERE (BOUNCER ENABLED) !!!
# - crowdsec@file
# - "cloudflarewarp@file" # CHANGE MADE HERE (BOUNCER ENABLED) !!!
# middlewares:
# - secureHeaders@file
# tls:
# certResolver: letsencrypt
smtp-ssl:
address: ":465"
starttls:
address: ":587"
imap-ssl:
address: ":993"
pop3-ssl:
address: ":995"
openvpn:
address: ":1194/udp"
mysql:
address: ":3306"
elasticsearch:
address: ":9200"
metrics:
address: ":8181"
@ -38,12 +70,23 @@ providers:
providersThrottleDuration: 10
certificatesResolvers:
cloudflare:
staging:
acme:
email: admin@tips-of-mine.fr
storage: acme.json
caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
dnsChallenge:
provider: cloudflare
provider: staging
tlschallenge: true
httpChallenge:
entryPoint: http
production:
acme:
email: admin@tips-of-mine.fr
storage: acme.json
caServer: "https://acme-v02.api.letsencrypt.org/directory"
dnsChallenge:
provider: production
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
@ -83,3 +126,21 @@ metrics:
# Ajout des services
addServicesLabels: true
addRoutersLabels: true
experimental:
plugins:
crowdsec-bouncer-traefik-plugin:
moduleName: "github.com/maxlerebourg/crowdsec-bouncer-traefik-plugin"
version: "v1.3.3"
traefik-maintenance:
moduleName: github.com/TRIMM/traefik-maintenance
version: v1.0.1
fail2ban:
moduleName: "github.com/tomMoulard/fail2ban"
version: "v0.8.3"
sablier:
moduleName: "github.com/acouvreur/sablier"
version: "v1.8.0-beta.22"

View File

@ -0,0 +1,99 @@
### networks
networks:
back_network:
driver: bridge
attachable: true
front_network:
driver: bridge
attachable: true
### Volumes
#volumes:
# traefik-logs:
### services
services:
# traefik
traefik:
container_name: traefik-app
hostname: traefik-app
image: traefik:latest
restart: always
ports:
- "80:80"
- "443:443"
- "8181:8181"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/etc/localtime:/etc/localtime:ro"
- "./configs/traefik.yml:/etc/traefik/traefik.yml"
- "./configs/dynamic:/etc/traefik/dynamic"
- "./certificates/acme.json:/etc/traefik/acme/acme.json"
- "./certificates:/etc/traefik/ssl"
- "./log:/var/log/traefik"
# - traefik-logs:/var/log/traefik
# environment:
# - CF_DNS_API_TOKEN=3836286773f145fb8f7c0758f2ce8896hb9dusqpsm6b3scn
networks:
- back_network
- front_network
### crowdsec
# crowdsec:
# container_name: crowdsec
# hostname: crowdsec
# image: crowdsecurity/crowdsec
# environment:
# PGID: "1000"
# COLLECTIONS: "crowdsecurity/traefik crowdsecurity/http-cve"
# expose:
# - "8080"
# volumes:
# - ./log/crowdsec:/var/log/crowdsec:ro
# - ./crowdsec-db:/var/lib/crowdsec/data
# - ./log/auth.log:/var/log/auth.log:ro
# - ./crowdsec:/etc/crowdsec
# - ./log:/var/log/traefik:ro
# restart: unless-stopped
# labels:
# - traefik.enable=false
# networks:
# - front_network
# - back_network
### Certificats
certificat:
container_name: traefik-certificat
hostname: traefik-certificat
image: alpine:latest
command: sh -c "cd /etc/traefik/ssl
&& wget traefik.me/cert.pem -O cert.pem
&& wget traefik.me/privkey.pem -O privkey.pem"
volumes:
- "./certificates:/etc/traefik/ssl"
networks:
- front_network
# whoami
whoami:
container_name: traefik-whoami
hostname: traefik-whoami
image: traefik/whoami:latest
restart: unless-stopped
networks:
- front_network
labels:
- "traefik.enable=true"
- "traefik.docker.network=front_network"
# HTTP
- "traefik.http.routers.whoami-http.rule=Host(`whoami.traefik.me`)"
- "traefik.http.routers.whoami-http.entrypoints=http"
# HTTPS
- "traefik.http.routers.whoami-https.rule=Host(`whoami.traefik.me`)"
- "traefik.http.routers.whoami-https.entrypoints=https"
- "traefik.http.routers.whoami-https.tls=true"
# - "traefik.http.routers.whoami-https.middlewares=whoami-crowdsec"
# Middleware
# - "traefik.http.middlewares.whoami-crowdsec.plugin.crowdsec-bouncer-traefik-plugin.enabled=true"
# - "traefik.http.middlewares.whoami-crowdsec.plugin.crowdsec-bouncer-traefik-plugin.crowdseclapikey=3836286773f145fb8f7c0758f2ce8896hb9dusqpsm6b3scn"
# Service