From 1d2a75d8481384e0e72dc06b467969c7fe83c360 Mon Sep 17 00:00:00 2001 From: Lukas Zapletal Date: Wed, 4 Oct 2023 10:19:55 +0200 Subject: [PATCH] chore: replace scripts with compose --- README.md | 12 +++-- docs/dev-environment.md | 37 +++++---------- scripts/README.md | 66 --------------------------- scripts/kafka.clean.sh | 10 ---- scripts/kafka.conf | 12 ----- scripts/kafka.setup.sh | 23 ---------- scripts/kafka.start.sh | 16 ------- scripts/sources.clean.sh | 10 ---- scripts/sources.conf | 42 ----------------- scripts/sources.seed.sh | 99 ---------------------------------------- scripts/sources.setup.sh | 16 ------- scripts/sources.start.sh | 9 ---- 12 files changed, 18 insertions(+), 334 deletions(-) delete mode 100644 scripts/README.md delete mode 100755 scripts/kafka.clean.sh delete mode 100644 scripts/kafka.conf delete mode 100755 scripts/kafka.setup.sh delete mode 100755 scripts/kafka.start.sh delete mode 100755 scripts/sources.clean.sh delete mode 100644 scripts/sources.conf delete mode 100755 scripts/sources.seed.sh delete mode 100755 scripts/sources.setup.sh delete mode 100755 scripts/sources.start.sh diff --git a/README.md b/README.md index e576631e..8b9a3eee 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,10 @@ Provisioning backend service for cloud.redhat.com. ## Components -* pbapi - API backend service -* pbworker - backend job processing worker -* pbmigrate - database migration tool with embedded SQL scripts +* pbackend api - API backend service +* pbackend worker - backend job processing worker +* pbackend statuser - backend sources processing worker (single instance) +* pbackend migrate - database migration tool with embedded SQL scripts ## Building @@ -25,8 +26,8 @@ make build Configuration is done via configuration files in `config/` directory, see [config/api.env.example](config/api.env.example) file for list of options with documentation. The application expects `config/app.env` file to be present, other programs from this git repo also look up additional file which will override values: -* `pbworker` looks up `config/worker.env` -* `pbmigrate` looks up `config/migrate.env` +* `worker` looks up `config/worker.env` +* `migrate` looks up `config/migrate.env` * `typesctl` looks up `config/typesctl.env` * integration (DAO) tests look up `config/test.env` @@ -41,6 +42,7 @@ To run all the components from this repository, you will need: * Go compiler * PostgreSQL server with UUID module * GNU Makefile +* [Backend services](https://github.com/RHEnVision/provisioning-compose) ``` dnf install postgresql-server postgresql-contrib diff --git a/docs/dev-environment.md b/docs/dev-environment.md index d8dfaab1..18e513cd 100644 --- a/docs/dev-environment.md +++ b/docs/dev-environment.md @@ -66,18 +66,6 @@ A make utility is used, we test on GNU Make which is available on all supported There are few utilities that you will need like code linter, `goimports` or migration tool for creating new migrations. Install them with `make install-tools`. -## Postgres - -Installation and configuration of Postgres is not covered neither in this article nor in the README. Full administrator access into an empty database is assumed as the application performs DDL commands during start. Or create a user with create table privileges. - -Tip: On MacOS, you can install Postgres on a remote Linux (or a small VM) and configure the application to connect there, instead of localhost. - -## Kafka - -In order to work on Kafka integrated services (statuser, sources), Kafka local deployment is needed. We do simply use the official Kafka binary that can be simply extracted and started. - -The [scripts](../scripts) directory contains README with further instructions and scripts which can download, extract, configure and start Kafka for local development. - ## Compilation and startup Use `make` command to compile the main application, use `make run` or start it manually via `./pbapi`. @@ -93,10 +81,6 @@ Notable records created via seed script: * Account number 13 with organization id 000013. This account is the first account (ID=1) and it is very often used on many examples (including this document). For [example](../scripts/rest_examples/http-client.env.json), RH-Identity-Header is an HTTP header that MUST be present in ALL requests, it is a base64-encoded JSON string which includes account number. * An example SSH public key. -## Backend services - -The application integrates with multiple backend services: - ## Worker Worker processes (`pbworker`) are responsible for running background jobs. There must be one or more processes running in order to pick up background jobs (e.g. launch reservations). There are multiple configuration options available via `WORKER_QUEUE`: @@ -112,24 +96,25 @@ In stage/prod, we currently use `redis`. Statuser process (`pbstatuser`) is a custom executable that runs in a single instance responsible for performing sources availability checks. These are requested over HTTP from the Sources app (see below), messages are enqueued in Kafka where the statuser instance picks them up in batches, performs checking, and sends the results back to Kafka to Sources. -## Sources +## Backend services -[Sources](https://github.com/RedHatInsights/sources-api-go) is an authentication inventory. Since it only requires Go, Redis and Postgres, we created a shell script that automatically checks out sources from git, compiles it, installs and creates postgres database, seeds data and starts the Sources application. - -Follow [instructions (section Sources service)](../scripts/README.md) to perform the setup. Note that configuration via `sources.local.conf` is **required** before the setup procedure. This has been written and tested for Fedora Linux, in other operating systems perform all the commands manually. +The application integrates with multiple backend services: -Tip: On MacOS, you can install Sources on a remote Fedora Linux (or a small VM) and configure the application to connect there, instead of localhost. +* Postgres +* Kafka +* Redis +* RBAC Service +* Sources Service +* Notifications Service -Tip: Alternatively, the application supports connecting to the stage environment through a HTTP proxy. See [configuration example](../config/api.env.example) for more details. Make sure to use account number from stage environment instead of the pre-seeded account number 000013. +All backend services can be started easily via [provisioning-compose](https://github.com/RHEnVision/provisioning-compose) on a local machine or remotely. -## Image Builder +### Image Builder on Stage Because Image Builder is more complex for installation, we do not recommend installing it on your local machine right now. Configure connection through HTTP proxy to the stage environment in `config/api.env`. See [configuration example](../config/api.env.example) for an example, you will need to ask someone from the company for real URLs for the service and the proxy. -## Notifications +### Notifications on Stage -[Notifications](https://github.com/RedHatInsights/notifications-backend) service handles notifications across services and allows email templates, webhooks triggering and 3rd party apps integration (i.e slack) -For local development, you can use [provisioning-compose](https://github.com/RHEnVision/provisioning-compose) to roll up notifications setup. When you just want to verify a notification kafka's messages, you can use `send-notification.http` to send a message directly to stage env, please notice that a cookie session is required, [click here](https://internal.console.stage.redhat.com/api/turnpike/session/) to generate one. ## Writing Go code diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index af8bcb28..00000000 --- a/scripts/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Development setup scripts - -This directory contains scripts which helps with initial setup for development setup which is container-less. Meaning, it will install software in your OS (PostgreSQL, Kafka) and checkout services from git (sources). - -These scripts are tested on Fedora Server or Workstation, latest stable release. You can use them as a step by step tutorial in case you are on a different OS. - -## Kafka service - -Kafka is needed for inter-service streaming and also for sources availability checks. - -### Configuration - -Review ./kafka.conf configuration. If you would like to do any changes, create ./kafka.local.conf which overrides the main configuration file and it is ignored by git. Currently, no changes are needed - you can even keep the unique cluster ID since for development purposes, only single-process instance is deployed. - -### Set up - -Run `./kafka.setup.sh` to download, extract and configure Kafka as a single-process deployment. - -### Start up - -Run `./kafka.start.sh` to start Kafka. - -### Clean up - -Run `./kafka.clean.sh` to start Kafka. This will also delete all data stored in `/tmp`! - -## Sources service - -Sources are needed to fetch authorization information for cloud providers. - -### Configuration - -Review ./sources.conf configuration. If you would like to do any changes, create ./sources.local.conf which overrides the main configuration file and it is ignored by git. - -The minimum configuration values required are: - -* ARN_ROLE: Amazon AWS role ARN string that will be used to seed the sources. A new Source, Application and Authentication records will be created with the credential. The Account/Tenant will be created with account_id/org_id 13/000013, therefore the same account number must be used in provisioning application. The resulting Source record will have ID 1 and name "Amazon provisioning". Example format: arn:aws:iam::123456789:role/redhat-provisioning-1 -* SUBSCRIPTION_ID: Azure subscription ID that will be used to seed the sources. -* PROJECT_ID: GCP project ID that will be used to seed the sources. - -If you run the seed script for the first time, the authorization records will have database (primary key) IDs of 1, 2 and 3 for AWS EC2, Amazon and GCP respectively. - -### Set up - -Install database, redis, init database, create sources user and database. Can be executed multiple times. You will be asked for "sudo" password. - - ./sources.setup.sh - -Checkout, compile and start sources backend. If you want to update the app, -just do "git pull" and start again. - -### Start up - - ./sources.start.sh - -### Seed - -Populate database with some data, with the sources app running do: - - ./sources.seed.sh - -### Clean up - -When you want to start over, to delete the database and user and git checkouts: - - ./sources.clean.sh diff --git a/scripts/kafka.clean.sh b/scripts/kafka.clean.sh deleted file mode 100755 index 92fb97d6..00000000 --- a/scripts/kafka.clean.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -BASEDIR=$(dirname $0) -source $BASEDIR/kafka.conf -[[ -f $BASEDIR/kafka.local.conf ]] && source $BASEDIR/kafka.local.conf - -if [[ $CLEAN_CHECKOUTS -eq 1 ]]; then - rm -rf $BASEDIR/kafka/ -fi - -rm -rf /tmp/kraft-combined-logs/ diff --git a/scripts/kafka.conf b/scripts/kafka.conf deleted file mode 100644 index 03793542..00000000 --- a/scripts/kafka.conf +++ /dev/null @@ -1,12 +0,0 @@ -# -# Do not edit this file, create kafka.local.conf instead. -# - -# Scala version -SVERSION=2.13 -# Kafka version -VERSION=3.4.0 -# Unique ID generated by "bin/kafka-storage.sh random-uuid" -UUID=sW-oBVubQ72CW9Z-QSXeEA -# Hostname when creating topics -KAFKA_HOST=localhost:9092 diff --git a/scripts/kafka.setup.sh b/scripts/kafka.setup.sh deleted file mode 100755 index 4be114e6..00000000 --- a/scripts/kafka.setup.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -BASEDIR=$(dirname $0) -source $BASEDIR/kafka.conf -[[ -f $BASEDIR/kafka.local.conf ]] && source $BASEDIR/kafka.local.conf - -test -d $BASEDIR/kafka || mkdir $BASEDIR/kafka - -pushd $BASEDIR/kafka - if [[ -f kafka.tgz ]]; then - echo "Kafka already downloaded, run ./kafka.start.sh" - else - echo "Downloading Kafka..." - curl -f -L -o kafka.tgz "https://www.apache.org/dist/kafka/$VERSION/kafka_$SVERSION-$VERSION.tgz" - echo "Extracting Kafka..." - if [[ -f kafka.tgz ]]; then - tar -xzf kafka.tgz --strip 1 - echo "All done, run ./kafka.start.sh" - else - echo "Kafka curl failed" - fi - fi -popd - diff --git a/scripts/kafka.start.sh b/scripts/kafka.start.sh deleted file mode 100755 index c4fd3270..00000000 --- a/scripts/kafka.start.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -BASEDIR=$(dirname $0) -source $BASEDIR/kafka.conf -[[ -f $BASEDIR/kafka.local.conf ]] && source $BASEDIR/kafka.local.conf - -echo "Formatting journal..." -$BASEDIR/kafka/bin/kafka-storage.sh format -t $UUID -c $BASEDIR/kafka/config/kraft/server.properties - -echo "Creating topics..." -# Start as background shell jobs, they attempt to reconnect until it succeeds -$BASEDIR/kafka/bin/kafka-topics.sh --create --topic platform.provisioning.internal.availability-check --bootstrap-server $KAFKA_HOST & -$BASEDIR/kafka/bin/kafka-topics.sh --create --topic platform.sources.status --bootstrap-server $KAFKA_HOST & -$BASEDIR/kafka/bin/kafka-topics.sh --create --topic platform.notifications.ingress --bootstrap-server $KAFKA_HOST & - -echo "Starting Kafka..." -$BASEDIR/kafka/bin/kafka-server-start.sh $BASEDIR/kafka/config/kraft/server.properties diff --git a/scripts/sources.clean.sh b/scripts/sources.clean.sh deleted file mode 100755 index 90aa92c8..00000000 --- a/scripts/sources.clean.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -BASEDIR=$(dirname $0) -source $BASEDIR/sources.conf -[[ -f $BASEDIR/sources.local.conf ]] && source $BASEDIR/sources.local.conf - -if [[ $CLEAN_CHECKOUTS -eq 1 ]]; then - rm -rf $BASEDIR/sources-api-go/ $BASEDIR/sources-database-populator/ -fi -sudo su - postgres -c "dropdb $DATABASE_NAME" -sudo su - postgres -c "dropuser $DATABASE_USER" diff --git a/scripts/sources.conf b/scripts/sources.conf deleted file mode 100644 index 885a09b1..00000000 --- a/scripts/sources.conf +++ /dev/null @@ -1,42 +0,0 @@ -# -# Do not edit this file, create sources.local.conf instead. -# - -# Some random port because 8000 is typically already used. -export PORT=8131 -export METRICS_PORT=9131 -export DATABASE_HOST=localhost -export DATABASE_PORT=5432 -export DATABASE_NAME=sources_devel -export DATABASE_USER=sources_user -# Local users are considered trusted via pg_hba.conf but -# password must be present otherwise sources won'b boot. -export DATABASE_PASSWORD=trusted -export BYPASS_RBAC=true -export REDIS_CACHE_HOST=localhost -export REDIS_CACHE_PORT=6379 -export SOURCES_API_HOST=http://localhost -export SOURCES_API_PORT=$PORT -# Delete git working trees during sources.clean.sh -export CLEAN_CHECKOUTS=1 - -# Default account to create in sources app (use the same account to use the ARN). -export ACCOUNT_ID=13 -export ORG_ID=000013 - -# TENANT ACCOUNT INFORMATION - MUST BE OVERWRITTEN IN sources.local.conf! -# See docs/ for more info. - -# Tenant AWS ARN string for EC2. -# Example: aws:iam::123456789:role/redhat-provisioning-1 -export ARN_ROLE= - -# Azure Subscription ID. A subscription that was allowed access through Azure Lighthouse. -# Can be found on tenant's account, or in the service account on Azure. -# Example: 4d3df606-608d-4e5a-ab17-7b3d71d775a6 -export SUBSCRIPTION_ID= - -# Google Cloud Platform project ID. Can be found on tenant's account, or in the -# credentials JSON from the service account. -# Example: my-project-13554 -export PROJECT_ID= diff --git a/scripts/sources.seed.sh b/scripts/sources.seed.sh deleted file mode 100755 index 1d91e617..00000000 --- a/scripts/sources.seed.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash -BASEDIR=$(dirname $0) -source $BASEDIR/sources.conf -[[ -f $BASEDIR/sources.local.conf ]] && source $BASEDIR/sources.local.conf - -if [[ -z "$ARN_ROLE" ]]; then - echo "ARN_ROLE must be defined in sources.local.conf!" - exit 1 -fi - -echo "Creating $ARN_ROLE with account_id $ACCOUNT_ID org_id $ORG_ID" -IDENTITY=$($BASEDIR/identity_header.sh $ACCOUNT_ID $ORG_ID) - -curl --location -g --request POST "http://localhost:$PORT/api/sources/v3.1/bulk_create" \ ---header "$IDENTITY" \ --d "$(cat <