Skip to content

build container image and push to gcloud #15

build container image and push to gcloud

build container image and push to gcloud #15

Workflow file for this run

on:
push:
branches:
- '*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@master
- name: Build Website
# TODO: pick a version?
uses: jakejarvis/hugo-build-action@master
with:
args: --minify
# so we can inspect what is being produced
- name: Upload Website as Artifact
uses: actions/upload-artifact@master
with:
name: website
path: './public'
- name: Google Auth
id: auth
uses: 'google-github-actions/auth@v2'
with:
token_format: 'access_token'
project_id: ${{ env.PROJECT_ID }}
service_account: ${{ env.SERVICE_ACCOUNT }}
workload_identity_provider: ${{ env.WORKLOAD_IDENTITY_PROVIDER }}
- name: Docker Auth
id: docker-auth
uses: 'docker/login-action@v1'
with:
username: 'oauth2accesstoken'
password: '${{ steps.auth.outputs.access_token }}'
registry: '${{ env.CONTAINER_REGISTRY_URL }}-docker.pkg.dev'
- name: Build and Push Container
run: |-
docker build -t "${{ env.AR_URL }}/${{ env.IMAGE_NAME }}:${{ github.sha }}" ./
docker push "${{ env.AR_URL }}/${{ env.IMAGE_NAME }}:${{ github.sha }}"
# upload to s3
- name: Install s3cmd
run: |
pip install s3cmd
- name: Configure s3cmd
run: |
echo "[default]" > ~/.s3cfg
echo "access_key = ${{ secrets.S3_KEY_ID }}" >> ~/.s3cfg
echo "secret_key = ${{ secrets.S3_SECRET_KEY }}" >> ~/.s3cfg
echo "host_base = fsn1.your-objectstorage.com" >> ~/.s3cfg
echo "host_bucket = %(bucket)s.fsn1.your-objectstorage.com" >> ~/.s3cfg
- name: Upload files to Hetzner S3
# with this configuration, the bucket should be specified as e.g. "s3://bucket1"
run: |
s3cmd sync ./public ${{ secrets.S3_BUCKET }} --delete-removed