feat: add deployment workflows for web, Android, Docker, macOS, and Linux
Build Images and Deploy / Update-PROD-Stack (push) Successful in 34s

This commit is contained in:
2026-03-30 22:00:36 -04:00
parent f5e69fa9cd
commit ab17324f85
13 changed files with 971 additions and 0 deletions
+81
View File
@@ -0,0 +1,81 @@
name: Build Android APK
on:
workflow_dispatch:
inputs:
tag:
description: 'Version tag (e.g. 2.0.0)'
required: false
default: '2.0.0'
permissions:
contents: write
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Set up Java
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
- name: Set up Android SDK
uses: android-actions/setup-android@v3
- name: Install Android platform tools
run: |
sdkmanager "platforms;android-34" "build-tools;34.0.0"
- name: Install Capacitor dependencies
working-directory: apps/android
run: npm install
- name: Add Android platform
working-directory: apps/android
run: npx cap add android || echo "Android platform already exists"
- name: Sync Capacitor
working-directory: apps/android
run: npx cap sync android
- name: Make gradlew executable
working-directory: apps/android/android
run: chmod +x gradlew
- name: Build debug APK
working-directory: apps/android/android
run: ./gradlew assembleDebug
- name: Rename APK
run: |
VERSION="${{ github.event.inputs.tag }}"
cp apps/android/android/app/build/outputs/apk/debug/app-debug.apk \
"dibby-wemo-$VERSION-android.apk"
- name: Upload APK artifact
uses: actions/upload-artifact@v4
with:
name: dibby-wemo-android-apk
path: dibby-wemo-*.apk
retention-days: 30
- name: Upload to GitHub Release
if: github.event.inputs.tag != ''
uses: softprops/action-gh-release@v2
with:
tag_name: v${{ github.event.inputs.tag }}
files: dibby-wemo-*.apk
fail_on_unmatched_files: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+43
View File
@@ -0,0 +1,43 @@
name: Build & Push Docker Image
on:
workflow_dispatch:
inputs:
tag:
description: 'Docker image tag (e.g. 2.0.0)'
required: true
default: '2.0.0'
jobs:
build-docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/k0rb3nd4ll4s/dibby-wemo-manager:${{ github.event.inputs.tag }}
ghcr.io/k0rb3nd4ll4s/dibby-wemo-manager:latest
cache-from: type=gha
cache-to: type=gha,mode=max
+78
View File
@@ -0,0 +1,78 @@
name: Build & Upload Linux Packages
on:
workflow_dispatch:
inputs:
tag:
description: 'Release tag to upload assets to (e.g. v2.0.0)'
required: true
default: 'v2.0.0'
jobs:
build-linux:
runs-on: ubuntu-22.04
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install Linux build tools
run: |
sudo apt-get update -qq
sudo apt-get install -y -qq rpm fakeroot dpkg libarchive-tools
- name: Install workspace dependencies
run: npm install --legacy-peer-deps
- name: Install app-builder to system PATH
run: |
# The binary is bundled in the npm package for the current platform.
# Copy it to /usr/local/bin so USE_SYSTEM_APP_BUILDER=true can find it.
BINARY=$(node -e "console.log(require('./node_modules/app-builder-bin').appBuilderPath)")
echo "app-builder binary: $BINARY"
chmod +x "$BINARY"
sudo cp "$BINARY" /usr/local/bin/app-builder
app-builder --version
- name: Vite build
run: npx electron-vite build
working-directory: apps/desktop
- name: Bundle standalone scheduler
run: node scripts/bundle-standalone.js
working-directory: apps/desktop
- name: Build Linux x64 packages
run: npx electron-builder --linux --x64 --publish never
working-directory: apps/desktop
env:
CSC_IDENTITY_AUTO_DISCOVERY: "false"
USE_SYSTEM_APP_BUILDER: "true"
- name: Build Linux arm64 packages
run: npx electron-builder --linux --arm64 --publish never
working-directory: apps/desktop
env:
CSC_IDENTITY_AUTO_DISCOVERY: "false"
USE_SYSTEM_APP_BUILDER: "true"
- name: List build output
run: ls -lh apps/desktop/dist/
- name: Upload Linux packages to release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ github.event.inputs.tag }}
files: |
apps/desktop/dist/*.AppImage
apps/desktop/dist/*.deb
apps/desktop/dist/*.rpm
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+52
View File
@@ -0,0 +1,52 @@
name: Build & Upload macOS Package
on:
workflow_dispatch:
inputs:
tag:
description: 'Release tag to upload assets to (e.g. v2.0.0)'
required: true
default: 'v2.0.0'
jobs:
build-mac:
runs-on: macos-latest
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install workspace dependencies
run: npm install --legacy-peer-deps
- name: Vite build
run: npx electron-vite build
working-directory: apps/desktop
- name: Bundle standalone scheduler
run: node scripts/bundle-standalone.js
working-directory: apps/desktop
- name: Build macOS packages
run: npx electron-builder --mac --publish never
working-directory: apps/desktop
env:
CSC_IDENTITY_AUTO_DISCOVERY: "false"
- name: List build output
run: ls -lh apps/desktop/dist/
- name: Upload macOS packages to release
uses: softprops/action-gh-release@v2
with:
tag_name: ${{ github.event.inputs.tag }}
files: apps/desktop/dist/*.dmg
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+89
View File
@@ -0,0 +1,89 @@
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions
name: Build Images and Deploy
run-name: ${{ gitea.actor }} is building new PROD images and redeploying the existing stack 🚀
on:
push:
# not working right now https://github.com/actions/runner/issues/2324
# paths-ignore:
# - **.yml
branches:
- main
env:
STACK_NAME: dibbly
DOT_ENV: ${{ secrets.PROD_ENV }}
PORTAINER_TOKEN: ${{ vars.PORTAINER_TOKEN }}
PORTAINER_API_URL: https://portainer.dev.nervesocket.com/api
ENDPOINT_NAME: "mini" #sometimes "primary"
IMAGE_TAG: "reg.dev.nervesocket.com/dibbly:latest"
jobs:
Update-PROD-Stack:
runs-on: ubuntu-latest
steps:
# if: contains(github.event.pull_request.head.ref, 'init-stack')
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push PROD Docker image
run: |
echo $DOT_ENV | base64 -d > .env
docker buildx build --push -f Dockerfile -t $IMAGE_TAG .
- name: Get the endpoint ID
# Usually ID is 1, but you can get it from the API. Only skip this if you are VERY sure.
run: |
ENDPOINT_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/endpoints" | jq -r ".[] | select(.Name==\"$ENDPOINT_NAME\") | .Id")
echo "ENDPOINT_ID=$ENDPOINT_ID" >> $GITHUB_ENV
echo "Got stack Endpoint ID: $ENDPOINT_ID"
- name: Fetch stack ID from Portainer
run: |
STACK_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks" | jq -r ".[] | select(.Name==\"$STACK_NAME\" and .EndpointId==$ENDPOINT_ID) | .Id")
echo "STACK_ID=$STACK_ID" >> $GITHUB_ENV
echo "Got stack ID: $STACK_ID matched with Endpoint ID: $ENDPOINT_ID"
- name: Fetch Stack
run: |
# Get the stack details (including env vars)
STACK_DETAILS=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks/$STACK_ID")
# Extract environment variables from the stack
echo "$STACK_DETAILS" | jq -r '.Env' > stack_env.json
echo "Existing stack environment variables:"
cat stack_env.json
- name: Redeploy stack in Portainer
run: |
# Read stack file content
STACK_FILE_CONTENT=$(echo "$(<web-compose.yml )")
# Read existing environment variables from the fetched stack
ENV_VARS=$(cat stack_env.json)
# Prepare JSON payload with environment variables
JSON_PAYLOAD=$(jq -n --arg stackFileContent "$STACK_FILE_CONTENT" --argjson pullImage true --argjson env "$ENV_VARS" \
'{stackFileContent: $stackFileContent, pullImage: $pullImage, env: $env}')
echo "About to push the following JSON payload:"
echo $JSON_PAYLOAD
# Update stack in Portainer (this redeploys it)
DEPLOY_RESPONSE=$(curl -X PUT "$PORTAINER_API_URL/stacks/$STACK_ID?endpointId=$ENDPOINT_ID" \
-H "X-API-Key: $PORTAINER_TOKEN" \
-H "Content-Type: application/json" \
--data "$JSON_PAYLOAD")
echo "Redeployed stack in Portainer. Response:"
echo $DEPLOY_RESPONSE
- name: Status check
run: |
echo "📋 This job's status is ${{ job.status }}. Make sure you delete the init file to avoid issues."
+89
View File
@@ -0,0 +1,89 @@
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions
name: Build Images and Deploy
run-name: ${{ gitea.actor }} is building new PROD images and redeploying the existing stack 🚀
on:
push:
# not working right now https://github.com/actions/runner/issues/2324
# paths-ignore:
# - **.yml
branches:
- main
env:
STACK_NAME: hashex
DOT_ENV: ${{ secrets.PROD_ENV }}
PORTAINER_TOKEN: ${{ vars.PORTAINER_TOKEN }}
PORTAINER_API_URL: https://portainer.dev.nervesocket.com/api
ENDPOINT_NAME: "mini" #sometimes "primary"
IMAGE_TAG: "reg.dev.nervesocket.com/hashex:latest"
jobs:
Update-PROD-Stack:
runs-on: ubuntu-latest
steps:
# if: contains(github.event.pull_request.head.ref, 'init-stack')
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push PROD Docker image
run: |
echo $DOT_ENV | base64 -d > .env
docker buildx build --push -f Dockerfile -t $IMAGE_TAG .
- name: Get the endpoint ID
# Usually ID is 1, but you can get it from the API. Only skip this if you are VERY sure.
run: |
ENDPOINT_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/endpoints" | jq -r ".[] | select(.Name==\"$ENDPOINT_NAME\") | .Id")
echo "ENDPOINT_ID=$ENDPOINT_ID" >> $GITHUB_ENV
echo "Got stack Endpoint ID: $ENDPOINT_ID"
- name: Fetch stack ID from Portainer
run: |
STACK_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks" | jq -r ".[] | select(.Name==\"$STACK_NAME\" and .EndpointId==$ENDPOINT_ID) | .Id")
echo "STACK_ID=$STACK_ID" >> $GITHUB_ENV
echo "Got stack ID: $STACK_ID matched with Endpoint ID: $ENDPOINT_ID"
- name: Fetch Stack
run: |
# Get the stack details (including env vars)
STACK_DETAILS=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks/$STACK_ID")
# Extract environment variables from the stack
echo "$STACK_DETAILS" | jq -r '.Env' > stack_env.json
echo "Existing stack environment variables:"
cat stack_env.json
- name: Redeploy stack in Portainer
run: |
# Read stack file content
STACK_FILE_CONTENT=$(echo "$(<prod-compose.yml )")
# Read existing environment variables from the fetched stack
ENV_VARS=$(cat stack_env.json)
# Prepare JSON payload with environment variables
JSON_PAYLOAD=$(jq -n --arg stackFileContent "$STACK_FILE_CONTENT" --argjson pullImage true --argjson env "$ENV_VARS" \
'{stackFileContent: $stackFileContent, pullImage: $pullImage, env: $env}')
echo "About to push the following JSON payload:"
echo $JSON_PAYLOAD
# Update stack in Portainer (this redeploys it)
DEPLOY_RESPONSE=$(curl -X PUT "$PORTAINER_API_URL/stacks/$STACK_ID?endpointId=$ENDPOINT_ID" \
-H "X-API-Key: $PORTAINER_TOKEN" \
-H "Content-Type: application/json" \
--data "$JSON_PAYLOAD")
echo "Redeployed stack in Portainer. Response:"
echo $DEPLOY_RESPONSE
- name: Status check
run: |
echo "📋 This job's status is ${{ job.status }}. Make sure you delete the init file to avoid issues."
+89
View File
@@ -0,0 +1,89 @@
# https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions
name: Build Images and Deploy
run-name: ${{ gitea.actor }} is building new PROD images and redeploying the existing stack 🚀
on:
push:
# not working right now https://github.com/actions/runner/issues/2324
# paths-ignore:
# - **.yml
branches:
- main
env:
STACK_NAME: dibbly
DOT_ENV: ${{ secrets.PROD_ENV }}
PORTAINER_TOKEN: ${{ vars.PORTAINER_TOKEN }}
PORTAINER_API_URL: https://portainer.dev.nervesocket.com/api
ENDPOINT_NAME: "mini" #sometimes "primary"
IMAGE_TAG: "reg.dev.nervesocket.com/dibbly:latest"
jobs:
Update-PROD-Stack:
runs-on: ubuntu-latest
steps:
# if: contains(github.event.pull_request.head.ref, 'init-stack')
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push PROD Docker image
run: |
echo $DOT_ENV | base64 -d > .env
docker buildx build --push -f Dockerfile -t $IMAGE_TAG .
- name: Get the endpoint ID
# Usually ID is 1, but you can get it from the API. Only skip this if you are VERY sure.
run: |
ENDPOINT_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/endpoints" | jq -r ".[] | select(.Name==\"$ENDPOINT_NAME\") | .Id")
echo "ENDPOINT_ID=$ENDPOINT_ID" >> $GITHUB_ENV
echo "Got stack Endpoint ID: $ENDPOINT_ID"
- name: Fetch stack ID from Portainer
run: |
STACK_ID=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks" | jq -r ".[] | select(.Name==\"$STACK_NAME\" and .EndpointId==$ENDPOINT_ID) | .Id")
echo "STACK_ID=$STACK_ID" >> $GITHUB_ENV
echo "Got stack ID: $STACK_ID matched with Endpoint ID: $ENDPOINT_ID"
- name: Fetch Stack
run: |
# Get the stack details (including env vars)
STACK_DETAILS=$(curl -s -H "X-API-Key: $PORTAINER_TOKEN" "$PORTAINER_API_URL/stacks/$STACK_ID")
# Extract environment variables from the stack
echo "$STACK_DETAILS" | jq -r '.Env' > stack_env.json
echo "Existing stack environment variables:"
cat stack_env.json
- name: Redeploy stack in Portainer
run: |
# Read stack file content
STACK_FILE_CONTENT=$(echo "$(<web-compose.yml )")
# Read existing environment variables from the fetched stack
ENV_VARS=$(cat stack_env.json)
# Prepare JSON payload with environment variables
JSON_PAYLOAD=$(jq -n --arg stackFileContent "$STACK_FILE_CONTENT" --argjson pullImage true --argjson env "$ENV_VARS" \
'{stackFileContent: $stackFileContent, pullImage: $pullImage, env: $env}')
echo "About to push the following JSON payload:"
echo $JSON_PAYLOAD
# Update stack in Portainer (this redeploys it)
DEPLOY_RESPONSE=$(curl -X PUT "$PORTAINER_API_URL/stacks/$STACK_ID?endpointId=$ENDPOINT_ID" \
-H "X-API-Key: $PORTAINER_TOKEN" \
-H "Content-Type: application/json" \
--data "$JSON_PAYLOAD")
echo "Redeployed stack in Portainer. Response:"
echo $DEPLOY_RESPONSE
- name: Status check
run: |
echo "📋 This job's status is ${{ job.status }}. Make sure you delete the init file to avoid issues."
+66
View File
@@ -0,0 +1,66 @@
version: '3.8'
services:
dibbly-web:
image: reg.dev.nervesocket.com/dibbly:latest
container_name: dibbly-wemo-manager-prod
restart: always
ports:
- "3456:3456"
volumes:
- dibbly-data:/data
- dibbly-logs:/app/logs
environment:
- DATA_DIR=/data
- PORT=3456
- NODE_ENV=production
networks:
- dibbly-network
# Use host networking on Linux for Wemo SSDP discovery
# Uncomment the line below if running on Linux
# network_mode: host
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3456/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
memory: 256M
reservations:
memory: 128M
# Optional: Reverse proxy for SSL termination
nginx:
image: nginx:alpine
container_name: dibbly-nginx
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
networks:
- dibbly-network
depends_on:
- dibbly-web
profiles:
- with-nginx
volumes:
dibbly-data:
driver: local
dibbly-logs:
driver: local
networks:
dibbly-network:
driver: bridge
+189
View File
@@ -0,0 +1,189 @@
# Web Deployment Guide
This guide covers deploying Dibby Wemo Manager as a web service using Docker containers.
## Quick Start
### Using Docker Compose (Recommended)
1. **Build and deploy with one command:**
```bash
# Linux/macOS
./scripts/web-deploy.sh
# Windows
powershell -ExecutionPolicy Bypass -File scripts/web-deploy.ps1
```
2. **Or manually:**
```bash
# Build the Docker image
docker build -t reg.dev.nervesocket.com/dibbly:latest .
# Start with Docker Compose
docker-compose -f web-compose.yml up -d
```
3. **Access the web interface:**
- Local: http://localhost:3456
- Mobile: http://YOUR_IP:3456
## Configuration
### Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `DATA_DIR` | `/data` | Persistent data directory for device configs and rules |
| `PORT` | `3456` | HTTP port for the web interface |
### Docker Compose Options
The `web-compose.yml` includes several production-ready features:
- **Persistent Data**: Uses Docker volumes to store device configurations and rules
- **Health Checks**: Automatic monitoring of service health
- **Restart Policy**: Automatically restarts if the service crashes
- **Network Isolation**: Runs in a dedicated Docker network
### Networking Considerations
#### Wemo Device Discovery
Wemo devices use SSDP (Simple Service Discovery Protocol) which requires special networking:
- **Linux**: Use `network_mode: host` for automatic device discovery
- **macOS/Windows**: Host networking isn't supported - add devices manually via the web UI
To enable host networking on Linux, uncomment this line in `web-compose.yml`:
```yaml
network_mode: host
```
#### Port Configuration
The service runs on port 3456 by default. To change it:
1. Update the port mapping in `web-compose.yml`
2. Set the `PORT` environment variable
3. Restart the service
## Management
### View Logs
```bash
docker-compose -f web-compose.yml logs -f
```
### Stop Service
```bash
docker-compose -f web-compose.yml down
```
### Update Service
```bash
# Pull latest image and restart
docker-compose -f web-compose.yml pull
docker-compose -f web-compose.yml up -d
```
### Backup Data
```bash
# Backup persistent data
docker run --rm -v dibbly-data:/data -v $(pwd):/backup alpine tar czf /backup/dibbly-backup.tar.gz -C /data .
# Restore data
docker run --rm -v dibbly-data:/data -v $(pwd):/backup alpine tar xzf /backup/dibbly-backup.tar.gz -C /data
```
## Web Interface Features
The web interface provides full parity with the desktop application:
- **Device Management**: Discover, add, and control Wemo devices
- **Scheduling**: Create and manage device schedules
- **Real-time Updates**: WebSocket-based live status updates
- **Mobile Optimized**: Responsive design for phones and tablets
- **Dark Mode**: Automatic theme detection
### API Endpoints
The service exposes a REST API for integration:
- `GET /api/devices` - List all devices
- `POST /api/devices/discover` - Discover new devices
- `GET/POST /api/devices/{host}/{port}/state` - Control device state
- `GET/POST/PUT/DELETE /api/dwm-rules` - Manage scheduling rules
- `GET /api/scheduler/status` - Get scheduler status
## Troubleshooting
### Common Issues
1. **Devices not discovered automatically**
- On macOS/Windows, add devices manually via the web UI
- On Linux, ensure host networking is enabled
2. **Service not accessible**
- Check if port 3456 is available
- Verify Docker is running
- Check firewall settings
3. **Data persistence issues**
- Ensure the Docker volume `dibbly-data` exists
- Check permissions on the data directory
### Health Checks
The service includes built-in health checks. Monitor status:
```bash
docker-compose -f web-compose.yml ps
```
### Performance
For optimal performance:
- Use host networking on Linux for faster device discovery
- Ensure adequate disk space for data persistence
- Monitor memory usage with multiple devices
## Security
### Network Security
- The service binds to `0.0.0.0` by default
- Consider using a reverse proxy (nginx/traefik) for production
- Implement authentication if exposing to the internet
### Data Protection
- Device configurations are stored in `/data`
- Regular backups are recommended
- Consider encrypting the data volume in production
## Production Deployment
For production environments, consider:
1. **Reverse Proxy**: Use nginx or Traefik for SSL termination
2. **Authentication**: Add authentication layer
3. **Monitoring**: Implement proper logging and monitoring
4. **Backups**: Automated backup strategy
5. **High Availability**: Multiple instances with load balancing
Example nginx configuration:
```nginx
server {
listen 443 ssl;
server_name your-domain.com;
ssl_certificate /path/to/cert.pem;
ssl_certificate_key /path/to/key.pem;
location / {
proxy_pass http://localhost:3456;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
```
+60
View File
@@ -0,0 +1,60 @@
events {
worker_connections 1024;
}
http {
upstream dibbly {
server dibbly-web:3456;
}
# HTTP redirect to HTTPS
server {
listen 80;
server_name _;
return 301 https://$host$request_uri;
}
# HTTPS server
server {
listen 443 ssl http2;
server_name _;
# SSL configuration
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/key.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
# Proxy to Dibby service
location / {
proxy_pass http://dibbly;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeout settings
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
}
+51
View File
@@ -0,0 +1,51 @@
# Dibby Wemo Manager - Web Deployment Script (PowerShell)
# This script builds and deploys the web version using Docker Compose
Write-Host "🚀 Deploying Dibby Wemo Manager (Web Version)..." -ForegroundColor Green
# Check if Docker is running
try {
docker info > $null 2>&1
} catch {
Write-Host "❌ Docker is not running. Please start Docker Desktop first." -ForegroundColor Red
exit 1
}
# Build the Docker image
Write-Host "📦 Building Docker image..." -ForegroundColor Blue
docker build -t reg.dev.nervesocket.com/dibbly:latest .
# Stop existing container if running
Write-Host "🛑 Stopping existing container..." -ForegroundColor Yellow
docker-compose -f web-compose.yml down 2>$null
# Start the service
Write-Host "🔄 Starting web service..." -ForegroundColor Blue
docker-compose -f web-compose.yml up -d
# Wait for service to be ready
Write-Host "⏳ Waiting for service to be ready..." -ForegroundColor Yellow
Start-Sleep -Seconds 10
# Check if service is running
$serviceStatus = docker-compose -f web-compose.yml ps
if ($serviceStatus -match "Up") {
Write-Host "✅ Dibby Wemo Manager is now running!" -ForegroundColor Green
Write-Host ""
Write-Host "🌐 Access the web interface at:" -ForegroundColor Cyan
Write-Host " http://localhost:3456"
Write-Host ""
Write-Host "📱 Mobile-friendly URL:" -ForegroundColor Cyan
$localIP = (Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias "Ethernet*","Wi-Fi*" | Where-Object { $_.IPAddress -notlike "169.*" -and $_.IPAddress -notlike "127.*" } | Select-Object -First 1).IPAddress
Write-Host " http://$($localIP):3456"
Write-Host ""
Write-Host "📊 View logs:" -ForegroundColor Cyan
Write-Host " docker-compose -f web-compose.yml logs -f"
Write-Host ""
Write-Host "🛑 Stop service:" -ForegroundColor Cyan
Write-Host " docker-compose -f web-compose.yml down"
} else {
Write-Host "❌ Failed to start the service. Check logs:" -ForegroundColor Red
docker-compose -f web-compose.yml logs
exit 1
}
+51
View File
@@ -0,0 +1,51 @@
#!/bin/bash
# Dibby Wemo Manager - Web Deployment Script
# This script builds and deploys the web version using Docker Compose
set -e
echo "🚀 Deploying Dibby Wemo Manager (Web Version)..."
# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
echo "❌ Docker is not running. Please start Docker first."
exit 1
fi
# Build the Docker image (if needed)
echo "📦 Building Docker image..."
docker build -t reg.dev.nervesocket.com/dibbly:latest .
# Stop existing container if running
echo "🛑 Stopping existing container..."
docker-compose -f web-compose.yml down || true
# Start the service
echo "🔄 Starting web service..."
docker-compose -f web-compose.yml up -d
# Wait for service to be ready
echo "⏳ Waiting for service to be ready..."
sleep 10
# Check if service is running
if docker-compose -f web-compose.yml ps | grep -q "Up"; then
echo "✅ Dibby Wemo Manager is now running!"
echo ""
echo "🌐 Access the web interface at:"
echo " http://localhost:3456"
echo ""
echo "📱 Mobile-friendly URL:"
echo " http://$(hostname -I | awk '{print $1}'):3456"
echo ""
echo "📊 View logs:"
echo " docker-compose -f web-compose.yml logs -f"
echo ""
echo "🛑 Stop service:"
echo " docker-compose -f web-compose.yml down"
else
echo "❌ Failed to start the service. Check logs:"
docker-compose -f web-compose.yml logs
exit 1
fi
+33
View File
@@ -0,0 +1,33 @@
version: '3.8'
services:
dibbly-web:
image: reg.dev.nervesocket.com/dibbly:latest
container_name: dibbly-wemo-manager
restart: unless-stopped
ports:
- "3456:3456"
volumes:
- dibbly-data:/data
environment:
- DATA_DIR=/data
- PORT=3456
networks:
- dibbly-network
# Use host networking on Linux for Wemo SSDP discovery
# Comment out the network_mode line below if not on Linux
# network_mode: host
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3456/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
volumes:
dibbly-data:
driver: local
networks:
dibbly-network:
driver: bridge