Laravel 2

#!/bin/bash

# DigitalOcean API Token:
DO_API_TOKEN="your_digitalocean_api_token"

# Set the parameters for the new droplet
DROPLET_NAME="example-droplet"
DROPLET_REGION="fra1"  # Frankfurt region code
DROPLET_SIZE="s-4vcpu-8gb"  # 4 vCPUs, 8GB Memory, adjust as needed
DROPLET_IMAGE="ubuntu-20-04-x64"  # Choose the desired OS image slug

# Volume parameters
VOLUME_NAME="docker-data"  # Updated volume name
VOLUME_SIZE=50  # Volume size in GB

# Additional volume parameters
ADDITIONAL_VOLUME_NAME="app-data"  # Additional volume name
ADDITIONAL_VOLUME_SIZE=50  # Additional volume size in GB

# Traefik Email
TRAEFIK_EMAIL="[email protected]"

# DigitalOcean API endpoint
DO_API_ENDPOINT="https://api.digitalocean.com/v2"

# Webhook URLs:
WEBHOOK_URL="https://cloudmonitor.dk/webhook"
PRICE_WEBHOOK_URL="https://cloudmonitor.dk/webhook/price"

# Firewall parameters
FIREWALL_NAME="example-firewall"
TRUSTED_SOURCES=("your_allowed_ip" "another_allowed_ip")

# Timeout duration in seconds (5 minutes)
TIMEOUT_DURATION=300

# Enable backups for the droplet
BACKUPS_ENABLED=true

# Create the first volume (docker-data)
volume_response=$(curl -X POST -H "Content-Type: application/json" \
     -H "Authorization: Bearer $DO_API_TOKEN" \
     -d '{"size_gigabytes": '"$VOLUME_SIZE"', "name": "'"$VOLUME_NAME"'", "region": "'"$DROPLET_REGION"'"}' \
     "$DO_API_ENDPOINT/volumes")

# Check if the first volume creation request was successful
if [ "$(echo "$volume_response" | jq -r '.id')" != "null" ]; then
    volume_id=$(echo "$volume_response" | jq -r '.volume.id')
    echo "Volume created successfully with ID: $volume_id"

    # Create the second volume (app-data)
    additional_volume_response=$(curl -X POST -H "Content-Type: application/json" \
         -H "Authorization: Bearer $DO_API_TOKEN" \
         -d '{"size_gigabytes": '"$ADDITIONAL_VOLUME_SIZE"', "name": "'"$ADDITIONAL_VOLUME_NAME"'", "region": "'"$DROPLET_REGION"'"}' \
         "$DO_API_ENDPOINT/volumes")

    # Check if the second volume creation request was successful
    if [ "$(echo "$additional_volume_response" | jq -r '.id')" != "null" ]; then
        additional_volume_id=$(echo "$additional_volume_response" | jq -r '.volume.id')
        echo "Additional volume created successfully with ID: $additional_volume_id"

        # Create a new droplet with both volumes and enable backups
        droplet_response=$(curl -X POST -H "Content-Type: application/json" \
             -H "Authorization: Bearer $DO_API_TOKEN" \
             -d '{"name": "'"$DROPLET_NAME"'", "region": "'"$DROPLET_REGION"'", "size": "'"$DROPLET_SIZE"'", "image": "'"$DROPLET_IMAGE"'", "volumes": ["'$volume_id'", "'$additional_volume_id'"], "backups": '$BACKUPS_ENABLED'}' \
             "$DO_API_ENDPOINT/droplets")

        # Check if the droplet creation request was successful
        if [ "$(echo "$droplet_response" | jq -r '.id')" != "null" ]; then
            droplet_id=$(echo "$droplet_response" | jq -r '.droplet.id')
            echo "Droplet created successfully with ID: $droplet_id"

            # Get droplet IP address within the timeout
            start_time=$(date +%s)
            while true; do
                current_time=$(date +%s)
                elapsed_time=$((current_time - start_time))

                if [ $elapsed_time -ge $TIMEOUT_DURATION ]; then
                    echo "Timeout reached. Unable to get droplet IP within $TIMEOUT_DURATION seconds." | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
                    exit 1
                fi

                droplet_ip=$(curl -s -H "Authorization: Bearer $DO_API_TOKEN" \
                    "$DO_API_ENDPOINT/droplets/$droplet_id" | jq -r '.droplet.networks.v4[0].ip_address')

                if [ -n "$droplet_ip" ]; then
                    echo "Droplet is reachable at IP: $droplet_ip"
                    break
                else
                    echo "Droplet IP not available yet. Retrying in 15 seconds..." | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
                    sleep 15
                fi
            done

            # Create a DigitalOcean firewall
            firewall_rules='{"inbound_rules": ['
            for port in 80 443 8080; do
                firewall_rules+='{"protocol": "tcp", "ports": '$port', "sources": {"addresses": ["0.0.0.0/0"]}},'
            done
            for port in 22 6085; do
                firewall_rules+='{"protocol": "tcp", "ports": '$port', "sources": {"addresses": ['$(printf '"%s" ' "${TRUSTED_SOURCES[@]}")']}},'
            done
            firewall_rules=${firewall_rules%,}']}'
            
            firewall_response=$(curl -X POST -H "Content-Type: application/json" \
                -H "Authorization: Bearer $DO_API_TOKEN" \
                -d '{"name": "'"$FIREWALL_NAME"'", '"$firewall_rules"'}' \
                "$DO_API_ENDPOINT/firewalls")

            # Check if the firewall creation request was successful
            if [ "$(echo "$firewall_response" | jq -r '.id')" != "null" ]; then
                firewall_id=$(echo "$firewall_response" | jq -r '.firewall.id')
                echo "Firewall created successfully with ID: $firewall_id"
            else
                error_message=$(echo "$firewall_response" | jq -r '.message')
                echo "Error creating firewall: $error_message" | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
            fi

            # Read the content from setup.sh
            setup_script=$(curl -sSL https://path-to-your/setup.sh)

            # Execute SSH command and capture output, then send it to the webhook
            ssh_command="ssh root@$droplet_ip \"$setup_script $TRAEFIK_EMAIL $DO_API_TOKEN\" 2>&1 | tee /dev/stderr | curl -X POST -H \"Content-Type: text/plain\" -d @- $WEBHOOK_URL"

            echo "Running script on the droplet..."
            eval $ssh_command

            # Fetch droplet pricing information
            droplet_price=$(curl -s -H "Authorization: Bearer $DO_API_TOKEN" \
                "$DO_API_ENDPOINT/droplets/$droplet_id" | jq -r '.droplet.size.price_monthly')

            # Report droplet pricing to the price webhook
            echo "Droplet pricing information: $droplet_price" | curl -X POST -H "Content-Type: text/plain" -d @- $PRICE_WEBHOOK_URL

            echo "Script executed successfully."

        else
            error_message=$(echo "$droplet_response" | jq -r '.message')
            echo "Error creating droplet: $error_message" | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
        fi

    else
        error_message=$(echo "$additional_volume_response" | jq -r '.message')
        echo "Error creating additional volume: $error_message" | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
    fi

else
    error_message=$(echo "$volume_response" | jq -r '.message')
    echo "Error creating volume: $error_message" | tee /dev/stderr | curl -X POST -H "Content-Type: text/plain" -d @- $WEBHOOK_URL
fi

Set up server

create_docker_user.sh

#!/bin/bash

# Customizable Variables
DOCKER_USER="docker"
DOCKER_PASSWORD_LENGTH=12

# Create Docker user
useradd -m -s /bin/bash "$DOCKER_USER" && \
docker_password=$(openssl rand -base64 "$DOCKER_PASSWORD_LENGTH") && \
echo "$DOCKER_USER:$docker_password" | chpasswd && \
usermod -aG docker "$DOCKER_USER" && \
echo "Docker user created with username: $DOCKER_USER and password: $docker_password"

# Restrict SSH access to only the "docker" user
echo "AllowUsers $DOCKER_USER" >> /etc/ssh/sshd_config
systemctl restart ssh

# Generate Ed25519 SSH key pair for Docker user
su - $DOCKER_USER -c "ssh-keygen -t ed25519 -f /home/$DOCKER_USER/.ssh/id_ed25519 -N ''"

# Display information
echo "SSH key pair for Docker user created at: /home/$DOCKER_USER/.ssh/id_ed25519"
echo "Password for Docker user: $docker_password"

install_docker.sh

#!/bin/bash

# Customizable Variables
DOCKER_DATA_DIR="/mnt/docker-data"

# Install Docker and Docker Compose
apt update && apt install -y docker.io docker-compose && \
systemctl enable docker && systemctl start docker

# Configure Docker data directory
mkdir -p "$DOCKER_DATA_DIR" && \
echo '{"data-root": "'"$DOCKER_DATA_DIR"'"}' > /etc/docker/daemon.json && \
service docker restart

# Display information
echo "Docker and Docker Compose installed and configured."

install_do_metrics_agent.sh

#!/bin/bash

# Digital Ocean Metrics Agent Variables
DO_METRICS_AGENT_REPO_FILE="/etc/apt/sources.list.d/digitalocean-agent.list"
DO_METRICS_AGENT_KEY_URL="https://repos.insights.digitalocean.com/sonar-agent.asc"

# Install Digital Ocean Metrics Agent
echo "deb https://repos.insights.digitalocean.com/apt/do-agent/ main main" | tee "$DO_METRICS_AGENT_REPO_FILE"
curl "$DO_METRICS_AGENT_KEY_URL" | apt-key add -
apt update
apt install -y do-agent

# Start and enable the metrics agent service
systemctl enable do-agent
systemctl start do-agent

# Add cronjob for regular updates
echo "*/5 * * * * root /usr/bin/do-agent --numeric" > /etc/cron.d/do-agent

# Display information
echo "Digital Ocean Metrics Agent installed and configured for monitoring."

create_traefik_service.sh

#!/bin/bash

# Customizable Variables
TRAEFIK_EMAIL="$1"

# Check if TRAEFIK_EMAIL is provided as an argument
if [ -z "$TRAEFIK_EMAIL" ]; then
  echo "Usage: $0 <TRAEFIK_EMAIL>"
  exit 1
fi

# Create Traefik service with Varnish, Varnish Dashboard, and Traefik Dashboard
traefik_dir="/home/docker/services/traefik"
mkdir -p "$traefik_dir" && \
cat <<EOL > "$traefik_dir/docker-compose.yml"
version: "3.8"
services:
  traefik:
    image: traefik:latest
    container_name: traefik
    command:
      - --api.insecure=true
      - --providers.docker=true
      - --providers.docker.exposedbydefault=false
      - --entrypoints.web.address=:80
      - --entrypoints.websecure.address=:443
      - --certificatesresolvers.myresolver.acme.httpchallenge=true
      - --certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web
      - --certificatesresolvers.myresolver.acme.email="$TRAEFIK_EMAIL"
      - --certificatesresolvers.myresolver.acme.storage="$traefik_dir/acme.json"
      - --log.level=INFO
      - --log.filePath="$traefik_dir/traefik.log"
    ports:
      - 80:80
      - 443:443
      - 8080:8080  # Expose Traefik Dashboard on port 8080
    networks:
      - traefik_proxy
    volumes:
      - "/mnt/app-data:/app-data"

  varnish:
    image: varnish:latest
    container_name: varnish
    depends_on:
      - traefik
    networks:
      - traefik_proxy
    ports:
      - 6081:6081
    environment:
      - VARNISH_BACKEND_HOST=traefik
      - VARNISH_BACKEND_PORT=80

  varnish-dashboard:
    image: eeacms/varnish-dashboard:latest
    container_name: varnish-dashboard
    depends_on:
      - varnish
    ports:
      - 6085:80  # Expose Varnish Dashboard on port 6085

networks:
  traefik_proxy:
    external: true
EOL

# Start Traefik, Varnish, Varnish Dashboard, and Traefik Dashboard services
cd "$traefik_dir" && docker-compose up -d

# Create update script for Traefik
update_traefik_script="$traefik_dir/update_traefik.sh"
cat <<EOL > "$update_traefik_script"
#!/bin/bash

# Update Traefik container
docker-compose pull
docker-compose up -d
EOL

# Make update script executable
chmod +x "$update_traefik_script"

# Add cronjob for Traefik updates (run every day at 3 AM, for example)
echo "0 3 * * * su - docker -c '$update_traefik_script'" | crontab -

echo "Traefik service created and configured."

update_system.sh

#!/bin/bash

# Update the system
apt update && apt upgrade -y

# Restart Docker service (if installed)
systemctl restart docker

echo "System updated."

Install

# Run each script in the correct order
./create_docker_user.sh
./install_docker.sh
./install_do_metrics_agent.sh
./create_traefik_service.sh "$1"
./update_system.sh

---

----

# GitLab CI/CD Pipeline Script

# IMPORTANT: Define the following variables in your GitLab CI/CD project settings:
# - SERVER: SSH hostname or IP address of the remote server (e.g., example.com or example.com:2222 for a specific port)
#            You can set a specific server value for each branch by creating variables like:
#            SERVER_master, SERVER_main, SERVER_syst, SERVER_test
# - SERVER_BRANCH_VARIABLE: Used internally to differentiate SERVER per branch
# - SSH_PASSWORD: Password for SSH authentication (Optional, can be used instead of SSH key)
# - SSH_KEY_FILE: Path to the SSH key file for authentication (Optional, can be used instead of password)
# - CI_REGISTRY_USER: Docker registry username for authentication
# - CI_REGISTRY_PASSWORD: Docker registry password or access token for authentication
# - PROJECT_DOMAINS: Comma-separated list of project domains for Traefik (e.g., domain1.com,domain2.com)
# - ENV: Key-value pairs for additional environment variables (each pair on a separate line)
#
# To add these variables:
# 1. Navigate to your GitLab project on the GitLab website.
# 2. Go to "Settings" in the left sidebar.
# 3. Select "CI / CD" in the submenu.
# 4. Scroll down to the "Variables" section.
# 5. Add the required variables with their corresponding values.

# Generate SSH Key:
# To use an SSH key for authentication, you need to generate one and add it to GitLab.
# Run the following command on your local machine to generate an SSH key pair:
# $ ssh-keygen -t rsa -b 4096 -C "[email protected]"
# This command will prompt you to enter a passphrase. You can press Enter for no passphrase.
# The SSH key pair will be generated in the default location (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub).
# Add the contents of the public key (~/.ssh/id_rsa.pub) to the GitLab SSH keys section.

stages:
  - build
  - test
  - compile
  - deploy
  - release

before_script:
  - apt-get update -qy
  - apt-get install -y unzip

cache:
  paths:
    - vendor/

# Build Stage
build:
  stage: build
  script:
    - echo "Building the application"
    - composer install --no-ansi --no-interaction --no-progress --no-scripts --optimize-autoloader
    - php artisan key:generate

# Test Stage
test:
  stage: test
  script:
    - echo "Running tests"
    - vendor/bin/phpstan analyze
    - vendor/bin/phpunit --coverage-text --colors=never

# Compile Stage
compile:
  stage: compile
  script:
    - rm -f .env
    - npm install
    - npm run build
    - docker build -t $CI_REGISTRY/$CI_PROJECT_NAME:$CI_COMMIT_REF_NAME .
  only:
    - master
    - main
    - syst
    - test

# Deploy Stage
deploy:
  stage: deploy
  script:
    - echo "$CI_REGISTRY_PASSWORD" | docker login -u "$CI_REGISTRY_USER" --password-stdin $CI_REGISTRY
    - docker push $CI_REGISTRY/$CI_PROJECT_NAME:$CI_COMMIT_REF_NAME
  only:
    - master
    - main
    - syst
    - test

# Release Stage
release:
  stage: release
  script:
    - echo "Docker Compose Step"
    - |
      # Differentiate SERVER per branch (e.g., SERVER_master, SERVER_main, SERVER_syst, SERVER_test)
      SERVER_BRANCH_VARIABLE="SERVER_$CI_COMMIT_REF_NAME"
      SERVER="${!SERVER_BRANCH_VARIABLE:-$SERVER}"
      REMOTE_HOST=$(echo "$SERVER" | grep ":" || echo "$SERVER:22")
      SSH_OPTIONS=""
      if [ -n "$SSH_PASSWORD" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -o PasswordAuthentication=yes"
      elif [ -n "$SSH_KEY_FILE" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -i $SSH_KEY_FILE"
      fi
      ssh $SSH_OPTIONS docker@$REMOTE_HOST "cd /home/docker/apps/$CI_PROJECT_NAME/$CI_COMMIT_REF_NAME && \
        if [ ! -f docker-compose.yml ]; then \
          curl -o docker-compose.yml https://cloudmonitor.dk/docker-compose.yml && \
          sed -i 's/{{DOCKER_IMAGE}}/$CI_REGISTRY\/$CI_PROJECT_NAME:$CI_COMMIT_REF_NAME/' docker-compose.yml && \
          TRAEFIK_LABEL_NAME=$(echo "$CI_REGISTRY/$CI_PROJECT_NAME:$CI_COMMIT_REF_NAME" | sed 's/[^a-zA-Z0-9]/_/g') && \
          sed -i 's/{{TRAEFIK_LABEL_NAME}}/$TRAEFIK_LABEL_NAME/' docker-compose.yml && \
          if [ -f .env ]; then \
            DB_PASSWORD=$(grep '^DB_PASSWORD=' .env | cut -d '=' -f2); \
          else \
            DB_PASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1); \
            echo 'DB_PASSWORD=$DB_PASSWORD' >> .env; \
          fi && \
          sed -i "s/{{DB_PASSWORD}}/$DB_PASSWORD/" docker-compose.yml && \
          PROJECT_DOMAINS=\$(echo "$PROJECT_DOMAINS" | sed 's/,/`,`/g') && \
          sed -i "s/{{DOMAINS}}/$PROJECT_DOMAINS/" docker-compose.yml && \
          docker-compose up -d; \
        fi"

    - echo "Dotenv Step"
    - |
      # Differentiate SERVER per branch (e.g., SERVER_master, SERVER_main, SERVER_syst, SERVER_test)
      SERVER_BRANCH_VARIABLE="SERVER_$CI_COMMIT_REF_NAME"
      SERVER="${!SERVER_BRANCH_VARIABLE:-$SERVER}"
      REMOTE_HOST=$(echo "$SERVER" | grep ":" || echo "$SERVER:22")
      SSH_OPTIONS=""
      if [ -n "$SSH_PASSWORD" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -o PasswordAuthentication=yes"
      elif [ -n "$SSH_KEY_FILE" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -i $SSH_KEY_FILE"
      fi
      ssh $SSH_OPTIONS docker@$REMOTE_HOST "cd /home/docker/apps/$CI_PROJECT_NAME/$CI_COMMIT_REF_NAME && \
        [ ! -f .env ] && touch .env && \
        echo 'DB_USERNAME=root' >> .env && \
        if [ -f .env ] && grep -q '^DB_PASSWORD=' .env; then \
          echo 'DB_PASSWORD already exists in .env'; \
        else \
          DB_PASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1); \
          echo 'DB_PASSWORD=$DB_PASSWORD' >> .env; \
        fi && \
        echo "$ENV" | while IFS= read -r env_var; do \
          echo "$env_var" >> .env; \
        done"

    - echo "Docker Compose Pull Step"
    - |
      # Differentiate SERVER per branch (e.g., SERVER_master, SERVER_main, SERVER_syst, SERVER_test)
      SERVER_BRANCH_VARIABLE="SERVER_$CI_COMMIT_REF_NAME"
      SERVER="${!SERVER_BRANCH_VARIABLE:-$SERVER}"
      REMOTE_HOST=$(echo "$SERVER" | grep ":" || echo "$SERVER:22")
      SSH_OPTIONS=""
      if [ -n "$SSH_PASSWORD" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -o PasswordAuthentication=yes"
      elif [ -n "$SSH_KEY_FILE" ]; then
        SSH_OPTIONS="$SSH_OPTIONS -i $SSH_KEY_FILE"
      fi
      ssh $SSH_OPTIONS docker@$REMOTE_HOST "cd /home/docker/apps/$CI_PROJECT_NAME/$CI_COMMIT_REF_NAME && \
        docker login -u $CI_REGISTRY_USER -p $CI_JOB_TOKEN $CI_REGISTRY && \
        docker-compose pull && \
        docker-compose up -d"
  only:
    - master
    - main
    - syst
    - test

# Add additional stages and jobs as needed

version: '3'
services:
  snapshotter:
    image: alpine:latest
    env_file:
      - .env
    command: /bin/sh -c '
      # Load environment variables from .env file
      # Make sure .env contains DIGITALOCEAN_API_TOKEN and VOLUME_NAMES
      # Example VOLUME_NAMES format: "volume1 volume2 volume3"

      apk --no-cache add doctl
      CURRENT_DATE=$(date +"%Y%m%d")

      # Create snapshots
      for VOLUME_NAME in $VOLUME_NAMES; do
        SNAPSHOT_NAME="$VOLUME_NAME-$CURRENT_DATE"
        echo "Creating snapshot for volume: $VOLUME_NAME with name: $SNAPSHOT_NAME"
        doctl compute volume snapshot $VOLUME_NAME --snapshot-name $SNAPSHOT_NAME --wait
      done

      # Delete old snapshots
      for VOLUME_NAME in $VOLUME_NAMES; do
        SNAPSHOT_NAME_PREFIX="$VOLUME_NAME-"
        doctl compute snapshot list --format Name,created --no-header | \
          grep "$SNAPSHOT_NAME_PREFIX" | \
          while read -r SNAPSHOT; do
            SNAPSHOT_DATE=$(echo $SNAPSHOT | awk '{print $2}' | sed 's/-//g')
            if [ $SNAPSHOT_DATE -lt $(date --date="30 days ago" +"%Y%m%d") ]; then
              echo "Deleting old snapshot: $SNAPSHOT"
              SNAPSHOT_ID=$(echo $SNAPSHOT | awk '{print $1}')
              doctl compute snapshot delete $SNAPSHOT_ID --force
            fi
          done
      done

      echo "Snapshots creation and deletion complete!"
    '

#!/bin/bash

# Cloudflare API details

# 1. Obtain your Cloudflare API Key:
#    - Log in to your Cloudflare account.
#    - Navigate to the "My Profile" page.
#    - Under the "API Tokens" section, generate a new API key with the necessary permissions.
CF_API_KEY="your_api_key"        # Replace with your Cloudflare API key

# 2. Use the email associated with your Cloudflare account.
CF_API_EMAIL="[email protected]"  # Replace with your Cloudflare account email

# 3. Obtain your Cloudflare Zone ID:
#    - Make a request to the Cloudflare API to get the Zone ID.
#    - Replace 'your_api_key' and '[email protected]' with your actual API key and email.
#    - Look for the relevant Zone ID in the API response.
CF_ZONE_ID="your_zone_id"         # Replace with your Cloudflare Zone ID

# DNS record details
FULL_DOMAIN="example.com"         # Replace with your root or subdomain (e.g., subdomain.example.com or example.com)
RECORD_TYPE="A"
RECORD_CONTENT="192.168.1.1"      # Replace with the IP address to which the domain or subdomain should point
RECORD_TTL=120
PROXIED=true                      # Set to 'true' to enable Cloudflare proxying

# Cloudflare API endpoint
CF_API_ENDPOINT="https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records"

# Create DNS record with proxying enabled
curl -s -X POST "$CF_API_ENDPOINT" \
     -H "X-Auth-Key: $CF_API_KEY" \
     -H "X-Auth-Email: $CF_API_EMAIL" \
     -H "Content-Type: application/json" \
     --data "{\"type\":\"$RECORD_TYPE\",\"name\":\"$FULL_DOMAIN\",\"content\":\"$RECORD_CONTENT\",\"ttl\":$RECORD_TTL,\"proxied\":$PROXIED}"

echo "DNS record created successfully for $FULL_DOMAIN with proxying enabled"
#!/bin/bash

# DigitalOcean API token
DO_TOKEN="YOUR_DIGITALOCEAN_API_TOKEN"

# Droplet name and firewall details
DROPLET_NAME="your_droplet_name"
PORT=$1
IPS=("${@:2}")

# Create a new firewall
create_firewall() {
    FIREWALL_NAME="temp_firewall_$(date +'%s')"
    
    # Create firewall and get its ID
    FIREWALL_ID=$(curl -s -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $DO_TOKEN" \
        -d '{"name": "'"$FIREWALL_NAME"'", "inbound_rules": [{"protocol": "tcp", "ports": "'"$PORT"'", "sources": {"addresses": ["'"${IPS[@]}"'"]}}]}' \
        "https://api.digitalocean.com/v2/firewalls" | jq -r '.firewall.id')

    echo "Firewall created with ID: $FIREWALL_ID"

    # Associate the firewall with the specified droplet
    DROPLET_ID=$(curl -s -H "Authorization: Bearer $DO_TOKEN" \
        "https://api.digitalocean.com/v2/droplets?tag_name=$DROPLET_NAME" | jq -r '.droplets[0].id')

    curl -s -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $DO_TOKEN" \
        -d '{"firewall_id": "'"$FIREWALL_ID"'"}' \
        "https://api.digitalocean.com/v2/droplets/$DROPLET_ID/actions" > /dev/null

    echo "Firewall associated with droplet: $DROPLET_NAME"
}

# Delete the firewall after 60 minutes
delete_firewall() {
    sleep 3600  # Sleep for 60 minutes

    # Delete the firewall
    curl -s -X DELETE -H "Authorization: Bearer $DO_TOKEN" \
        "https://api.digitalocean.com/v2/firewalls/$FIREWALL_ID" > /dev/null

    echo "Firewall deleted"
}

# Main script execution
if [ $# -lt 2 ]; then
    echo "Usage: $0 <port> <ip1> <ip2> ..."
    exit 1
fi

create_firewall &  # Run in the background
delete_firewall &  # Run in the background

wait  # Wait for background tasks to finish
#!/bin/bash

# Set your variables

# User's email address for AutoDiscover to determine Exchange Online server
read -p "Enter user's email address: " UserPrincipalName

# Guidance for obtaining API token and Azure AD tenant ID
echo "To obtain the API token and Azure AD tenant ID:"
echo "1. Register an app in the Azure portal: https://portal.azure.com/"
echo "2. Obtain the client ID, client secret, and tenant ID during the app registration process."
echo "3. Use the obtained values to authenticate and obtain the API token."
echo "4. Enter the API token and tenant ID when prompted below."

# Prompt user for API token
echo "You can use the 'az ad sp create-for-rbac' command from the Azure CLI to obtain the API token."
echo "Example command: az ad sp create-for-rbac --name YourAppName --password YourAppPassword --output json"
read -s -p "Enter API token for Microsoft 365: " ApiToken
echo  # Move to the next line after entering the token

# Prompt user for Azure AD tenant ID
read -p "Enter Azure AD tenant ID: " TenantId

# API details for Microsoft 365 authentication
# For Microsoft 365, you typically need to register an app in the Azure portal and obtain client ID, secret, and tenant ID.
# The token endpoint is usually a standard Microsoft identity platform endpoint.
ApiUrl="https://login.microsoftonline.com/$TenantId/oauth2/v2.0/token"

# Discover Exchange Online server using AutoDiscover
ExchangeServer=$(curl -LsS "https://autodiscover-s.outlook.com/autodiscover/autodiscover.xml" -H "Host: autodiscover-s.outlook.com" --data "<Autodiscover xmlns='http://schemas.microsoft.com/exchange/autodiscover/outlook/requestschema/2006'>
<Request>
  <EMailAddress>$UserPrincipalName</EMailAddress>
  <AcceptableResponseSchema>http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a</AcceptableResponseSchema>
</Request>
</Autodiscover>")
# Note: AutoDiscover is used to determine the Exchange Online server dynamically based on the user's email address.

# Extract the Exchange server from the AutoDiscover response
ExchangeServer=$(echo "$ExchangeServer" | grep -oPm1 "(?<=<Server>).*(?=<\/Server>)")
# Note: This extracts the Exchange server from the AutoDiscover response for further use.

# Authenticate with Microsoft 365 API
# The following is just a placeholder. Replace it with the actual OAuth 2.0 authentication process.
# Example: curl -X POST -d "grant_type=password&client_id=your-client-id&client_secret=your-client-secret&resource=https://[email protected]&password=user-password" "$ApiUrl"

# Connect to Exchange Online PowerShell
curl --url "http://$ExchangeServer/powershell-liveid/" -u $UserCredential -o $HOME/ExchangeSession.pst -s
$Session = $HOME/ExchangeSession.pst
$Import = Import-PSSession $Session -DisableNameChecking

$Import | Out-Null

# Run PowerShell commands or any other necessary tasks

# Log API interaction if needed
# curl -X POST -d "Script executed successfully on $ExchangeServer" -H "Authorization: Bearer $ApiToken" "$ApiUrl/Log"
# Note: If necessary, log API interactions. Adjust this command based on your specific logging requirements.
Emil Moe

Software- and Data Engineer

I created this website to help you empower your infrastructure and so you don't need to spend the same amount of hours as me on researching. I chose to make the site ad-free, so if you like what I do, please consider supporting my Patreon.

Leave a Reply

Your email address will not be published. Required fields are marked *