migrated core_nest to mainroom

This commit is contained in:
buenosairesam
2025-12-24 06:23:31 -03:00
parent 329c401ff5
commit d62337e7ba
50 changed files with 5503 additions and 73 deletions

19
.gitignore vendored
View File

@@ -1,4 +1,15 @@
fails # Previous attempts and drafts
gen fails/
def def/
__pycache__
# Python
__pycache__/
*.pyc
*.pyo
.venv/
venv/
# Generated runnable instance (symlinks + generated)
gen/
# But track the generated models (one-time per client)
!gen/models/

156
CLAUDE.md
View File

@@ -18,18 +18,20 @@ spr/
├── config/ # Framework configurations ├── config/ # Framework configurations
│ └── soleprint.config.json │ └── soleprint.config.json
├── artery/ # ACTUAL source - Vital connections ├── ctrl/ # Soleprint room's own ctrl
├── artery/ # VERSIONED - Vital connections
│ ├── veins/ # Single-responsibility connectors │ ├── veins/ # Single-responsibility connectors
│ ├── pulses/ # Composed: Vein + Room + Depot │ ├── pulses/ # Composed: Vein + Room + Depot
│ ├── rooms/ # Environment configs │ ├── rooms/ # Environment configs
│ └── depots/ # Data storage │ └── depots/ # Data storage
├── atlas/ # ACTUAL source - Documentation system ├── atlas/ # VERSIONED - Documentation system
│ ├── templates/ # Gherkin, BDD patterns │ ├── templates/ # Gherkin, BDD patterns
│ ├── books/ # Composed: Template + Depot │ ├── books/ # Composed: Template + Depot
│ └── depots/ # Data storage │ └── depots/ # Data storage
├── station/ # ACTUAL source - Tools & execution ├── station/ # VERSIONED - Tools & execution
│ ├── tools/ # Utilities, generators, runners │ ├── tools/ # Utilities, generators, runners
│ │ ├── generator/ # Model/framework generator │ │ ├── generator/ # Model/framework generator
│ │ ├── datagen/ # Test data generation │ │ ├── datagen/ # Test data generation
@@ -39,17 +41,34 @@ spr/
│ ├── rooms/ # Environment configs │ ├── rooms/ # Environment configs
│ └── depots/ # Data storage │ └── depots/ # Data storage
├── data/ # Site content as JSON files ├── data/ # JSON content files (versioned)
── gen/ # RUNNABLE instance (run from here) ── hub/ # VERSIONED base files
├── main.py # Hub entry point ├── main.py # Hub entry point
├── index.html # Landing page ├── index.html # Landing page
├── requirements.txt ├── requirements.txt # Dependencies
── models/ # Generated Pydantic models ── dataloader/ # Data loading module
├── data/ # Symlink → ../data/
├── artery/ # Symlink → ../artery/ ├── gen/ # RUNNABLE instance (gitignored, symlinks)
├── atlas/ # Symlink → ../atlas/ ├── main.py # → ../hub/main.py
── station/ # Symlink → ../station/ ── index.html # → ../hub/index.html
│ ├── requirements.txt # → ../hub/requirements.txt
│ ├── dataloader/ # → ../hub/dataloader/
│ ├── artery/ # → ../artery/
│ ├── atlas/ # → ../atlas/
│ ├── station/ # → ../station/
│ ├── data/ # → ../data/
│ └── models/ # GENERATED (one-time per client)
│ └── pydantic/
└── mainroom/ # Orchestration: soleprint ↔ managed room
├── ctrl/ # Orchestration commands
├── sbwrapper/ # Sidebar wrapper UI
├── link/ # Adapters (connect without modifying either side)
│ └── adapters/ # Framework-specific adapters (django, etc.)
└── soleprint/ # Docker configs for soleprint services
├── docker-compose.yml
└── Dockerfile.fastapi
``` ```
## The Three Systems ## The Three Systems
@@ -79,42 +98,54 @@ Composed: Pulse (artery), Book (atlas), Desk (station)
A **Room** is an environment with soleprint context, features, and conventions: A **Room** is an environment with soleprint context, features, and conventions:
- Every room has a `ctrl/` folder with commands that act only on that room - Every room has a `ctrl/` folder with commands that act only on that room
- Tools are pluggable into any room - Tools are pluggable into any room
- **core_room** is special: orchestrates soleprint + managed sites (Docker lives outside soleprint) - Managed projects work with their own defaults (env vars set by mainroom ctrl for orchestration)
### Mainroom
The **mainroom** orchestrates interaction between soleprint and managed rooms:
- `sbwrapper/` - Sidebar UI overlay for any managed app (quick login, Jira info, etc.)
- `link/` - Adapters to connect soleprint to managed app data WITHOUT modifying either
- `soleprint/` - Docker configs for running soleprint services
- `ctrl/` - Commands for orchestration (sets env vars, starts services)
### Hub vs Gen
- `hub/` = Versioned base files (main.py, dataloader, index.html)
- `gen/` = Gitignored runnable instance with symlinks to hub/ + systems
- `gen/models/` = Generated models (one-time per client, like an install)
**Development:** Edit in hub/, artery/, atlas/, station/, data/ → run from gen/
**Production:** Copy everything (resolve symlinks)
### The Generator ### The Generator
Lives in `station/tools/generator/`. It: Lives in `station/tools/generator/`. It:
1. Reads `schema.json` (source of truth) 1. Reads `schema.json` (source of truth)
2. Generates Pydantic models to `gen/models/` 2. Generates Pydantic models to `gen/models/`
3. Model generation is **infrequent** - only when schema changes 3. Generation is **one-time per client** (like install)
4. Runs standalone (no model dependencies) for bootstrap
**Bootstrap:** Generator runs standalone (no model dependencies), generates models, then station can use them.
### Development Symlinks
For development, `gen/` contains symlinks back to source:
- `gen/artery/``../artery/`
- `gen/atlas/``../atlas/`
- `gen/station/``../station/`
- `gen/data/``../data/`
This means: edit in `spr/artery/`, run from `spr/gen/`, no regeneration needed.
**Production:** Copy everything (resolve symlinks).
### Naming Flexibility ### Naming Flexibility
Code inside soleprint components should NOT have imports too tied to "artery", "atlas", "station" names. At some point these could be swapped for different naming schemes (for teams with different domain language). Code inside soleprint should NOT have imports too tied to system names. Display names are configurable. Future: swap entire naming domains without breaking functionality.
## Development Workflow ## Development Workflow
### Running Locally ### Running Locally
```bash ```bash
cd spr/gen cd spr/gen
pip install -r requirements.txt
python main.py # Hub on :12000 python main.py # Hub on :12000
``` ```
### Regenerating Models (infrequent) ### Regenerating Models (one-time / rare)
```bash ```bash
cd spr/station/tools/generator cd spr/station/tools/generator
python -m generators.orchestrator --config ../../../config/soleprint.config.json --output ../../../gen python -m generators.orchestrator \
--config ../../../config/soleprint.config.json \
--output ../../../gen
```
### Orchestrating with Managed Room
```bash
cd spr/mainroom/ctrl
./start.sh # Sets env vars, starts soleprint + link services
``` ```
### Worktrees ### Worktrees
@@ -122,28 +153,27 @@ Feature development in: `/home/mariano/wdir/wts/spr/<branch>`
Planned: Planned:
- `databrowse` - Data browser tool (separate CLAUDE.md) - `databrowse` - Data browser tool (separate CLAUDE.md)
- `sbwrapper` - Sidebar wrapper UI for core_room (separate CLAUDE.md) - `sbwrapper` - Sidebar wrapper development
## External Dependencies ## External References
| What | Location | Notes | | What | Location | Notes |
|------|----------|-------| |------|----------|-------|
| Core Room | `core_nest/` | Orchestration + Docker (outside spr) | | Core Nest (legacy) | `core_nest/` | Original orchestration, being replaced by mainroom |
| Amar Backend | `ama/amar_django_back` | Test subject | | Amar Backend | `ama/amar_django_back` | Example managed room |
| Amar Frontend | `ama/amar_frontend` | Test subject | | Amar Frontend | `ama/amar_frontend` | Example managed room |
| Pawprint | `ama/pawprint` | Legacy - migrate tools then deprecate | | Pawprint (legacy) | `ama/pawprint` | Original pet-themed naming, deprecated |
## Tools Status ## Tools Status
| Tool | Source | Status | Notes | | Tool | Location | Status | Notes |
|------|--------|--------|-------| |------|----------|--------|-------|
| generator | fails/02/generators | Move to station/tools/ | Refactor file IO | | generator | station/tools/generator | Working | Refactor file IO pending |
| datagen | pawprint/ward/tools | Consolidate | Merge with tester/generate_test_data | | datagen | station/tools/datagen | Working | Test data generation |
| tester | pawprint/ward/tools | Advanced | Full BDD/playwright | | tester | station/tools/tester | Advanced | Full BDD/playwright |
| databrowse | - | WIP | Separate worktree | | hub | station/tools/hub | Idea | Port management |
| hub | pawprint/ward/tools | Idea | Port management | | infra | station/tools/infra | Idea | Cloud deploy scripts |
| infra | pawprint/ward/tools | Idea | Cloud deploy scripts | | graphgen | station/tools/graphgen | Idea | Graph generation |
| graphgen | pawprint/ward/tools | Idea | Graph generation |
## Ports ## Ports
@@ -157,25 +187,28 @@ Planned:
## Current State ## Current State
**Done:** **Done:**
- [x] Model schema defined (pawprint/models/schema.json) - [x] Project structure finalized
- [x] Generator working (fails/02/generators/) - [x] Schema.json in place
- [x] Generated instance in gen/ - [x] Generator moved to station/tools/
- [x] Hub/gen separation with symlinks
- [x] Mainroom structure from core_nest
- [x] Docker configs updated to soleprint naming
- [x] Tools consolidated from pawprint
**Next (in order):** **Next:**
1. [ ] Create folder structure (artery/, atlas/, station/, config/) 1. [ ] Test gen/ runs correctly
2. [ ] Move schema.json to spr/ 2. [ ] Create spr/ctrl/ scripts
3. [ ] Move generator to station/tools/generator/ 3. [ ] Complete mainroom/ctrl/ orchestration scripts
4. [ ] Move config to spr/config/ 4. [ ] Worktree for databrowse
5. [ ] Set up symlinks in gen/ 5. [ ] Worktree for sbwrapper
6. [ ] Consolidate tools from pawprint/ward/tools/
7. [ ] Integrate core_room (sbwrapper)
8. [ ] Worktrees for databrowse, sbwrapper
## Files Ignored (gitignore) ## Files Ignored (gitignore)
- `fails/` - Previous attempts, reference only - `fails/` - Previous attempts, reference only
- `gen/` - Generated/runnable, not source (except models/)
- `def/` - Definition drafts - `def/` - Definition drafts
- `gen/` - Runnable instance (except gen/models/)
- `__pycache__/`, `*.pyc`
- `venv/`, `.venv/`
## Quick Reference ## Quick Reference
@@ -186,6 +219,9 @@ cd gen && python main.py
# Health check # Health check
curl localhost:12000/health curl localhost:12000/health
# View systems # View landing
open http://localhost:12000 open http://localhost:12000
# Docker (via mainroom)
cd mainroom/soleprint && docker compose up -d
``` ```

View File

@@ -27,15 +27,25 @@ python main.py
spr/ spr/
├── schema.json # Model definitions (source of truth) ├── schema.json # Model definitions (source of truth)
├── config/ # Framework configuration ├── config/ # Framework configuration
├── artery/ # Connectors (source) ├── ctrl/ # Soleprint room ctrl
├── atlas/ # Documentation (source)
├── station/ # Tools (source) ├── artery/ # Connectors (versioned)
├── atlas/ # Documentation (versioned)
├── station/ # Tools (versioned)
│ └── tools/ │ └── tools/
│ ├── generator/ # Generates models & structure │ ├── generator/ # Generates models from schema
│ ├── datagen/ # Test data generation │ ├── datagen/ # Test data generation
│ └── tester/ # Test runner │ └── tester/ # BDD/contract test runner
├── data/ # Content (JSON)
── gen/ # Runnable instance (run from here) ── data/ # JSON content
├── hub/ # Base files (main.py, dataloader, etc.)
├── gen/ # Runnable instance (symlinks + generated models)
└── mainroom/ # Orchestration: soleprint ↔ managed room
├── ctrl/ # Orchestration commands
├── sbwrapper/ # Sidebar wrapper UI
├── link/ # Adapters for managed apps
└── soleprint/ # Docker configs
``` ```
## Components ## Components
@@ -62,21 +72,40 @@ cd gen
python main.py python main.py
``` ```
### Regenerate models (when schema.json changes) ### Regenerate models (one-time / rare)
```bash ```bash
cd station/tools/generator cd station/tools/generator
python -m generators.orchestrator --output ../../../gen python -m generators.orchestrator --output ../../../gen
``` ```
### Run with Docker (via mainroom)
```bash
cd mainroom/soleprint
docker compose up -d
```
## Ports ## Ports
| Service | Port | | Service | Port |
|---------|------| |---------|------|
| Hub | 12000 | | Hub (soleprint) | 12000 |
| Artery | 12001 | | Artery | 12001 |
| Atlas | 12002 | | Atlas | 12002 |
| Station | 12003 | | Station | 12003 |
## Architecture
```
hub/ → Versioned base files (main.py, dataloader)
gen/ → Runnable instance (symlinks to hub/ + systems)
gen/models/ → Generated once per client (like install)
mainroom/ → Orchestration layer
├── sbwrapper → UI overlay for managed apps
├── link → Data adapters (no modification to either side)
└── soleprint → Docker for soleprint services
```
## Background ## Background
Born from the friction of: Born from the friction of:

186
hub/dataloader/__init__.py Normal file
View File

@@ -0,0 +1,186 @@
"""
Soleprint Data Loader
Loads JSON data files and provides typed access via Pydantic models.
JSON files live in data/ directory (content only, no code).
"""
import json
import os
import sys
from pathlib import Path
from typing import List, Optional
# When symlinked, __file__ resolves to actual location (hub/dataloader)
# but we need to find models/ in the runtime directory (gen/)
# Use cwd as the base since we always run from gen/
_runtime_dir = Path.cwd()
_file_parent = Path(__file__).resolve().parent.parent
# Try runtime dir first (gen/), then fall back to file's parent (hub/)
if (_runtime_dir / "models").exists():
sys.path.insert(0, str(_runtime_dir))
else:
sys.path.insert(0, str(_file_parent))
from models.pydantic import (
Book,
BookCollection,
Depot,
DepotCollection,
Desk,
DeskCollection,
Pulse,
PulseCollection,
Room,
RoomCollection,
Status,
Template,
TemplateCollection,
Tool,
ToolCollection,
Vein,
VeinCollection,
)
# Data directory - try runtime dir first, then file's parent
_default_data = (
_runtime_dir / "data" if (_runtime_dir / "data").exists() else _file_parent / "data"
)
DATA_DIR = Path(os.getenv("SOLEPRINT_DATA_DIR", _default_data)).resolve()
def _load_json(filename: str) -> dict:
"""Load a JSON file from the data directory."""
filepath = DATA_DIR / filename
if filepath.exists():
with open(filepath) as f:
return json.load(f)
return {"items": []}
def _save_json(filename: str, data: dict):
"""Save data to a JSON file in the data directory."""
filepath = DATA_DIR / filename
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
# === Collection Loaders ===
def get_veins() -> List[Vein]:
data = _load_json("veins.json")
return VeinCollection(**data).items
def get_rooms() -> List[Room]:
data = _load_json("rooms.json")
return RoomCollection(**data).items
def get_depots() -> List[Depot]:
data = _load_json("depots.json")
return DepotCollection(**data).items
def get_templates() -> List[Template]:
data = _load_json("templates.json")
return TemplateCollection(**data).items
def get_tools() -> List[Tool]:
data = _load_json("tools.json")
return ToolCollection(**data).items
def get_cabinets() -> list:
"""Load cabinets (simple dict for now)."""
data = _load_json("cabinets.json")
return data.get("items", [])
def get_monitors() -> list:
"""Load monitors (simple dict for now)."""
data = _load_json("monitors.json")
return data.get("items", [])
def get_pulses() -> List[Pulse]:
data = _load_json("pulses.json")
return PulseCollection(**data).items
def get_books() -> List[Book]:
data = _load_json("books.json")
return BookCollection(**data).items
def get_desks() -> List[Desk]:
data = _load_json("desks.json")
return DeskCollection(**data).items
# === Single Item Helpers ===
def get_vein(name: str) -> Optional[Vein]:
for v in get_veins():
if v.name == name:
return v
return None
def get_room(name: str) -> Optional[Room]:
for r in get_rooms():
if r.name == name:
return r
return None
def get_depot(name: str) -> Optional[Depot]:
for d in get_depots():
if d.name == name:
return d
return None
def get_tool(name: str) -> Optional[Tool]:
for t in get_tools():
if t.name == name:
return t
return None
# === System Data (for frontend rendering) ===
def get_artery_data() -> dict:
"""Data for artery frontend."""
return {
"veins": [v.model_dump() for v in get_veins()],
"rooms": [r.model_dump() for r in get_rooms()],
"depots": [d.model_dump() for d in get_depots()],
"pulses": [p.model_dump() for p in get_pulses()],
}
def get_atlas_data() -> dict:
"""Data for atlas frontend."""
return {
"templates": [t.model_dump() for t in get_templates()],
"depots": [d.model_dump() for d in get_depots()],
"books": [b.model_dump() for b in get_books()],
}
def get_station_data() -> dict:
"""Data for station frontend."""
return {
"tools": [t.model_dump() for t in get_tools()],
"monitors": get_monitors(),
"cabinets": get_cabinets(),
"rooms": [r.model_dump() for r in get_rooms()],
"depots": [d.model_dump() for d in get_depots()],
"desks": [d.model_dump() for d in get_desks()],
}

174
hub/index.html Normal file
View File

@@ -0,0 +1,174 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>soleprint</title>
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64' fill='%23e5e5e5'%3E%3Cg transform='rotate(-15 18 38)'%3E%3Cellipse cx='18' cy='32' rx='7' ry='13'/%3E%3Cellipse cx='18' cy='48' rx='6' ry='7'/%3E%3C/g%3E%3Cg transform='rotate(15 46 28)'%3E%3Cellipse cx='46' cy='22' rx='7' ry='13'/%3E%3Cellipse cx='46' cy='38' rx='6' ry='7'/%3E%3C/g%3E%3C/svg%3E">
<style>
* { box-sizing: border-box; }
html { background: #0a0a0a; }
body {
font-family: system-ui, -apple-system, sans-serif;
max-width: 960px;
margin: 0 auto;
padding: 2rem 1rem;
line-height: 1.6;
color: #e5e5e5;
background: #0a0a0a;
}
header {
display: flex;
align-items: center;
gap: 1rem;
margin-bottom: 1rem;
}
.logo { width: 64px; height: 64px; }
h1 { font-size: 2.5rem; margin: 0; color: white; }
.tagline {
color: #a3a3a3;
margin-bottom: 2rem;
border-bottom: 1px solid #333;
padding-bottom: 2rem;
}
.mission {
background: #1a1a1a;
border-left: 3px solid #d4a574;
padding: 1rem 1.5rem;
margin: 2rem 0;
border-radius: 0 8px 8px 0;
color: #d4a574;
}
.systems {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
gap: 1.5rem;
margin: 2rem 0;
}
.system {
display: flex;
align-items: flex-start;
gap: 1rem;
text-decoration: none;
padding: 1.5rem;
border-radius: 12px;
transition: transform 0.15s, box-shadow 0.15s;
}
.system:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
}
.system.disabled {
opacity: 0.5;
pointer-events: none;
}
.system svg { width: 48px; height: 48px; flex-shrink: 0; }
.system-info h2 { margin: 0 0 0.25rem 0; font-size: 1.2rem; }
.system-info p { margin: 0; font-size: 0.9rem; color: #a3a3a3; }
.artery { background: #1a1a1a; border: 1px solid #b91c1c; }
.artery h2 { color: #fca5a5; }
.artery svg { color: #b91c1c; }
.atlas { background: #1a1a1a; border: 1px solid #15803d; }
.atlas h2 { color: #86efac; }
.atlas svg { color: #15803d; }
.station { background: #1a1a1a; border: 1px solid #1d4ed8; }
.station h2 { color: #93c5fd; }
.station svg { color: #1d4ed8; }
footer {
margin-top: 3rem;
padding-top: 1.5rem;
border-top: 1px solid #333;
font-size: 0.85rem;
color: #666;
}
</style>
</head>
<body>
<header>
<!-- Two shoe prints walking -->
<svg class="logo" viewBox="0 0 64 64" fill="currentColor">
<!-- Left shoe print (back, lower) -->
<g transform="rotate(-15 18 38)">
<!-- Sole -->
<ellipse cx="18" cy="32" rx="7" ry="13"/>
<!-- Heel -->
<ellipse cx="18" cy="48" rx="6" ry="7"/>
</g>
<!-- Right shoe print (front, higher) -->
<g transform="rotate(15 46 28)">
<!-- Sole -->
<ellipse cx="46" cy="22" rx="7" ry="13"/>
<!-- Heel -->
<ellipse cx="46" cy="38" rx="6" ry="7"/>
</g>
</svg>
<h1>soleprint</h1>
</header>
<p class="tagline">Cada paso deja huella</p>
<p class="mission" style="display:none;"><!-- placeholder for session alerts --></p>
<div class="systems">
<a {% if artery %}href="{{ artery }}"{% endif %} class="system artery{% if not artery %} disabled{% endif %}">
<!-- Flux capacitor style -->
<svg viewBox="0 0 48 48" fill="none" stroke="currentColor" stroke-width="2.5">
<path d="M24 4 L24 20 M24 20 L8 40 M24 20 L40 40"/>
<circle cx="24" cy="4" r="3" fill="currentColor"/>
<circle cx="8" cy="40" r="3" fill="currentColor"/>
<circle cx="40" cy="40" r="3" fill="currentColor"/>
<circle cx="24" cy="20" r="5" fill="none"/>
<circle cx="24" cy="20" r="2" fill="currentColor"/>
</svg>
<div class="system-info">
<h2>Artery</h2>
<p>Todo lo vital</p>
</div>
</a>
<a {% if atlas %}href="{{ atlas }}"{% endif %} class="system atlas{% if not atlas %} disabled{% endif %}">
<!-- Map/Atlas with compass rose -->
<svg viewBox="0 0 48 48" fill="currentColor">
<!-- Map fold lines -->
<path d="M4 8 L44 8 M4 16 L44 16 M4 24 L44 24 M4 32 L44 32 M4 40 L44 40" stroke="currentColor" stroke-width="1.5" opacity="0.3" fill="none"/>
<path d="M16 4 L16 44 M32 4 L32 44" stroke="currentColor" stroke-width="1.5" opacity="0.3" fill="none"/>
<!-- Compass rose in center -->
<circle cx="24" cy="24" r="8" fill="none" stroke="currentColor" stroke-width="2"/>
<path d="M24 16 L24 32 M16 24 L32 24" stroke="currentColor" stroke-width="2"/>
<path d="M24 16 L26 20 L24 24 L22 20 Z" fill="currentColor"/><!-- North arrow -->
</svg>
<div class="system-info">
<h2>Atlas</h2>
<p>Documentación accionable</p>
</div>
</a>
<a {% if station %}href="{{ station }}"{% endif %} class="system station{% if not station %} disabled{% endif %}">
<!-- Control panel with knobs and meters -->
<svg viewBox="0 0 48 48" fill="currentColor">
<!-- Panel frame -->
<rect x="4" y="8" width="40" height="32" rx="2" fill="none" stroke="currentColor" stroke-width="2"/>
<!-- Knobs -->
<circle cx="14" cy="18" r="5"/>
<circle cx="14" cy="18" r="2" fill="white"/>
<circle cx="34" cy="18" r="5"/>
<circle cx="34" cy="18" r="2" fill="white"/>
<!-- Meter displays -->
<rect x="10" y="28" width="8" height="6" rx="1" fill="white" opacity="0.6"/>
<rect x="30" y="28" width="8" height="6" rx="1" fill="white" opacity="0.6"/>
<!-- Indicator lights -->
<circle cx="24" cy="14" r="2" fill="white" opacity="0.8"/>
</svg>
<div class="system-info">
<h2>Station</h2>
<p>Monitores, Entornos y Herramientas</p>
</div>
</a>
</div>
<footer>soleprint</footer>
</body>
</html>

136
hub/main.py Normal file
View File

@@ -0,0 +1,136 @@
"""
Soleprint - Overview and routing hub.
Development workflow and documentation system
👣 Mapping development footprints
Systems:
💉 Artery (artery) - Todo lo vital
🗺️ Atlas (atlas) - Documentación accionable
🎛️ Station (station) - Monitores, Entornos y Herramientas
Routes:
/ → index
/health → health check
/api/data/artery → artery data
/api/data/atlas → atlas data
/api/data/station → station data
/artery/* → proxy to artery service
/atlas/* → proxy to atlas service
/station/* → proxy to station service
"""
import os
from pathlib import Path
# Import data functions
from dataloader import get_artery_data, get_atlas_data, get_station_data
from fastapi import FastAPI, Request
from fastapi.responses import RedirectResponse
from fastapi.templating import Jinja2Templates
app = FastAPI(title="Soleprint", version="0.1.0")
templates = Jinja2Templates(directory=Path(__file__).parent)
# Service URLs (internal for API calls)
ARTERY_URL = os.getenv("ARTERY_URL", "http://localhost:12001")
ATLAS_URL = os.getenv("ATLAS_URL", "http://localhost:12002")
STATION_URL = os.getenv("STATION_URL", "http://localhost:12003")
# External URLs (for frontend links, falls back to internal)
ARTERY_EXTERNAL_URL = os.getenv("ARTERY_EXTERNAL_URL", ARTERY_URL)
ATLAS_EXTERNAL_URL = os.getenv("ATLAS_EXTERNAL_URL", ATLAS_URL)
STATION_EXTERNAL_URL = os.getenv("STATION_EXTERNAL_URL", STATION_URL)
@app.get("/health")
def health():
return {
"status": "ok",
"service": "soleprint",
"subsystems": {
"artery": ARTERY_URL,
"atlas": ATLAS_URL,
"station": STATION_URL,
},
}
# === Data API ===
@app.get("/api/data/artery")
def api_artery_data():
"""Data for artery service."""
return get_artery_data()
@app.get("/api/data/atlas")
def api_atlas_data():
"""Data for atlas service."""
return get_atlas_data()
@app.get("/api/data/station")
def api_station_data():
"""Data for station service."""
return get_station_data()
@app.get("/")
def index(request: Request):
return templates.TemplateResponse(
"index.html",
{
"request": request,
"artery": ARTERY_EXTERNAL_URL,
"atlas": ATLAS_EXTERNAL_URL,
"station": STATION_EXTERNAL_URL,
},
)
# === Cross-system redirects ===
# These allow soleprint to act as a hub, redirecting to subsystem routes
@app.get("/artery")
@app.get("/artery/{path:path}")
def artery_redirect(path: str = ""):
"""Redirect to artery service."""
target = os.getenv("ARTERY_URL")
if target:
return RedirectResponse(url=f"{target}/{path}")
return {"error": "ARTERY_URL not configured"}
@app.get("/atlas")
@app.get("/atlas/{path:path}")
def atlas_redirect(path: str = ""):
"""Redirect to atlas service."""
target = os.getenv("ATLAS_URL")
if target:
return RedirectResponse(url=f"{target}/{path}")
return {"error": "ATLAS_URL not configured"}
@app.get("/station")
@app.get("/station/{path:path}")
def station_redirect(path: str = ""):
"""Redirect to station service."""
target = os.getenv("STATION_URL")
if target:
return RedirectResponse(url=f"{target}/{path}")
return {"error": "STATION_URL not configured"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"main:app",
host="0.0.0.0",
port=int(os.getenv("PORT", "12000")),
reload=os.getenv("DEV", "").lower() in ("1", "true"),
)

4
hub/requirements.txt Normal file
View File

@@ -0,0 +1,4 @@
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
pydantic>=2.5.0
httpx>=0.25.0

139
mainroom/CLAUDE.md Normal file
View File

@@ -0,0 +1,139 @@
# Mainroom - Orchestration Layer
## Purpose
Mainroom orchestrates the interaction between **soleprint** and **managed rooms** (external projects like amar).
Key principle: Connect soleprint to managed apps **without modifying either side**.
## Structure
```
mainroom/
├── CLAUDE.md # You are here
├── ctrl/ # Orchestration commands
│ ├── start.sh # Start services (sets env vars)
│ ├── stop.sh # Stop services
│ ├── build.sh # Build images
│ ├── logs.sh # View logs
│ ├── status.sh # Show status
│ ├── deploy.sh # Deploy to server
│ └── server/ # Server setup scripts
├── sbwrapper/ # Sidebar wrapper UI
│ ├── index.html # Wrapper shell
│ ├── sidebar.css # Styling
│ ├── sidebar.js # Logic
│ └── config.json # Per-room configuration
├── link/ # Adapter layer
│ ├── main.py # FastAPI service
│ ├── Dockerfile
│ ├── docker-compose.yml
│ └── adapters/ # Framework-specific adapters
│ ├── __init__.py # BaseAdapter interface
│ └── django.py # Django adapter (for amar)
└── soleprint/ # Docker configs for soleprint services
├── docker-compose.yml
├── docker-compose.nginx.yml
└── Dockerfile.fastapi
```
## Components
### ctrl/ - Orchestration Commands
Scripts that set env vars and start/stop services. The managed project works with its own defaults; ctrl sets overrides for orchestration.
```bash
./ctrl/start.sh # Start soleprint + link
./ctrl/start.sh --with-nginx # Start with nginx proxy
./ctrl/stop.sh # Stop all
./ctrl/logs.sh # View logs
./ctrl/status.sh # Show status
```
### sbwrapper/ - Sidebar Wrapper UI
Collapsible sidebar overlay for ANY managed app. Provides dev tools without interfering with the managed application.
**Features:**
- Quick login panel (switch test users)
- Jira ticket info panel
- Environment info
- Collapsible, resizable
- Keyboard shortcut: `Ctrl+Shift+P`
**Implementation:** HTML injection via nginx reverse proxy or iframe approach.
### link/ - Adapter Layer
Framework-agnostic data navigation between soleprint and managed apps.
**Pattern:**
```
Managed App (DB) ←── link adapters ──→ Soleprint (Station tools)
```
**Endpoints:**
- `GET /health` - Health check
- `GET /api/queries` - List available queries
- `GET /api/navigate?query=<name>` - Execute predefined query
- `GET /api/navigate?entity=<type>&id=<id>` - Entity navigation
**JSON Contract:**
```json
{
"nodes": [{"id": "User_123", "type": "User", "label": "john", "data": {...}}],
"edges": [{"from": "User_123", "to": "Pet_456", "label": "owns"}],
"summary": {"title": "User #123", "fields": {...}}
}
```
### soleprint/ - Docker Configs
Docker compose files for running soleprint services (hub, artery, atlas, station).
**Environment Variables:**
- `SOLEPRINT_BARE_PATH` - Path to soleprint source (gen/)
- `DEPLOYMENT_NAME` - Container prefix
- `NETWORK_NAME` - Docker network name
- `SOLEPRINT_PORT`, `ARTERY_PORT`, `ATLAS_PORT`, `STATION_PORT`
## How It Works
1. **ctrl/** sets environment variables for orchestration
2. **soleprint/** docker configs use those vars to mount code and expose ports
3. **link/** connects to managed app's database via adapters
4. **sbwrapper/** overlays UI on managed app via nginx injection
The managed project is never modified - it runs with its own defaults, mainroom just provides the orchestration layer on top.
## Ports
| Service | Port |
|---------|------|
| Soleprint Hub | 12000 |
| Artery | 12001 |
| Atlas | 12002 |
| Station | 12003 |
| Link | 8100 |
## Adding a New Managed Room
1. Create adapter in `link/adapters/` (implement BaseAdapter)
2. Configure `sbwrapper/config.json` with room-specific users, Jira ticket, etc.
3. Set env vars in ctrl scripts pointing to managed app
4. Run `./ctrl/start.sh`
## Worktrees
Feature development:
- `/home/mariano/wdir/wts/spr/sbwrapper` - Sidebar wrapper development
- `/home/mariano/wdir/wts/spr/databrowse` - Data browser tool
## External References
| What | Location |
|------|----------|
| Soleprint source | `../` (parent directory) |
| Amar backend | `/home/mariano/wdir/ama/amar_django_back` |
| Amar frontend | `/home/mariano/wdir/ama/amar_frontend` |
| Core nest (legacy) | `/home/mariano/wdir/ama/core_nest` |

25
mainroom/ctrl/.env.sync Normal file
View File

@@ -0,0 +1,25 @@
# Configuration for core_nest deployment
# Server configuration
DEPLOY_SERVER=mariano@mcrn.ar
# Docker deployment (default)
DEPLOY_REMOTE_PATH=~/core_nest
# Bare metal deployment (--bare-metal flag)
DEPLOY_BARE_METAL_PATH=~/pawprint
# Local source code paths
# (Defaults are set in deploy.sh if not specified here)
LOCAL_AMAR_BACKEND=/home/mariano/wdir/ama/amar_django_back
LOCAL_AMAR_FRONTEND=/home/mariano/wdir/ama/amar_frontend
LOCAL_PAWPRINT=/home/mariano/wdir/ama/pawprint
# =============================================================================
# TEST SYNC PATHS (decoupled, standalone)
# =============================================================================
# Source: Where tests come from (any repo with contract tests)
TEST_SOURCE_PATH=/home/mariano/wdir/ama/amar_django_back/tests/contracts
# Target: Where ward tester expects tests (volume mount, no restart needed)
WARD_TESTS_PATH=/home/mariano/wdir/ama/pawprint/ward/tools/tester/tests

102
mainroom/ctrl/.exclude Normal file
View File

@@ -0,0 +1,102 @@
# Exclude patterns for rsync deployment
# Used by deploy.sh for all sync operations
# =============================================================================
# VERSION CONTROL
# =============================================================================
.git
.gitignore
# =============================================================================
# PYTHON
# =============================================================================
*.pyc
*.pyo
*.pyd
__pycache__
.pytest_cache
.mypy_cache
.coverage
htmlcov
*.egg-info
dist
build
.venv
venv
env
ENV
# Django build artifacts
staticfiles
*.sqlite3
*.db
# =============================================================================
# NODE/JAVASCRIPT
# =============================================================================
node_modules
.next
.nuxt
dist
out
.cache
.parcel-cache
coverage
.nyc_output
.npm
.pnp
.pnp.js
.eslintcache
.turbo
# =============================================================================
# IDE / EDITOR
# =============================================================================
.vscode
.idea
*.swp
*.swo
*~
# =============================================================================
# OS
# =============================================================================
.DS_Store
Thumbs.db
# =============================================================================
# LOGS
# =============================================================================
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# =============================================================================
# ENVIRONMENT FILES
# =============================================================================
.env
.env.*
!.env.example
# =============================================================================
# CORE_NEST SPECIFIC
# =============================================================================
# Large dev seed data (use test/prod on server)
init-db/seed-dev
# Dumps directory (source for seed files)
dumps
# Build artifacts (Django collectstatic output - not album/static which has Prism.js)
amar/src/back/static
amar/src/back/staticfiles
media
# =============================================================================
# PAWPRINT SPECIFIC
# =============================================================================
# Local workspace/definition folder (not for production)
def/

92
mainroom/ctrl/README.md Normal file
View File

@@ -0,0 +1,92 @@
# Core Nest Control Scripts
Control scripts for managing the core_nest deployment (amar + pawprint).
## Structure
```
ctrl/
├── .env.sync # Configuration for deploy
├── .exclude # Rsync exclusion patterns
├── build.sh # Build Docker images (auto-detects services)
├── deploy.sh # Deploy to server (sync all files)
├── logs.sh # View container logs
├── setup.sh # Initial setup (nginx, certs, .env)
├── start.sh # Start Docker services
├── status.sh # Show container status
├── stop.sh # Stop Docker services
└── manual_sync/ # Source code sync scripts
├── sync_ama.sh # Sync amar source code
└── sync_pawprint.sh # Sync pawprint source code
```
## Configuration
Edit `.env.sync` to configure deployment:
```bash
# Server
DEPLOY_SERVER=mariano@mcrn.ar
DEPLOY_REMOTE_PATH=~/core_nest
# Local paths
LOCAL_PAWPRINT_PATH=/home/mariano/wdir/ama/pawprint
LOCAL_AMAR_BASE=/home/mariano/wdir/ama
# Remote paths
REMOTE_PAWPRINT_PATH=/home/mariano/pawprint
REMOTE_AMAR_PATH=/home/mariano/core_nest/amar/src
```
## Usage
### Full Deployment
```bash
cd ctrl
./deploy.sh # Deploy everything to server
./deploy.sh --dry-run # Preview what would be synced
# Then on server:
ssh server 'cd ~/core_nest/ctrl && ./build.sh && ./start.sh -d'
```
### Local Development
```bash
./start.sh # Start all services (foreground, see logs)
./start.sh -d # Start all services (detached)
./start.sh --build # Start with rebuild
./start.sh -d --build # Detached with rebuild
./logs.sh # View logs
./stop.sh # Stop all services
```
### Service Management
```bash
# All scripts auto-detect services (any dir with docker-compose.yml)
./build.sh # Build all images
./build.sh amar # Build only amar images
./build.sh --no-cache # Force rebuild without cache
./start.sh # Start all (foreground)
./start.sh -d # Start all (detached)
./start.sh amar # Start specific service
./start.sh --build # Start with rebuild
./stop.sh # Stop all services
./stop.sh amar # Stop specific service
./logs.sh # View all logs
./logs.sh amar # View amar compose logs
./logs.sh backend # View specific container logs
./status.sh # Show container status
```
## Nest vs Pawprint Control
- **core_nest/ctrl/** - Manages the full nest (amar + pawprint) via Docker
- **pawprint/ctrl/** - Manages pawprint services via systemd (alternative deployment)
Use core_nest/ctrl for orchestrating the full nest with Docker Compose.
Use pawprint/ctrl for direct systemd deployment of pawprint services only.

65
mainroom/ctrl/build.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/bin/bash
# Build core_nest Docker images
#
# Usage:
# ./build.sh # Build all
# ./build.sh <service> # Build specific service
# ./build.sh --no-cache # Force rebuild without cache
set -e
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
# Export core_nest/.env vars so child docker-compose files can use them
if [ -f ".env" ]; then
export $(grep -v '^#' .env | grep -v '^$' | xargs)
fi
TARGET="all"
NO_CACHE=""
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
for arg in "$@"; do
case $arg in
--no-cache) NO_CACHE="--no-cache" ;;
all) TARGET="all" ;;
*)
# Check if it's a valid service directory
if [[ " ${SERVICE_DIRS[@]} " =~ " ${arg} " ]]; then
TARGET="$arg"
fi
;;
esac
done
build_service() {
local service=$1
echo "Building $service images..."
cd "$service"
DOCKER_BUILDKIT=0 COMPOSE_DOCKER_CLI_BUILD=0 docker compose build $NO_CACHE
cd ..
echo " $service images built"
}
if [ "$TARGET" = "all" ]; then
for service in "${SERVICE_DIRS[@]}"; do
build_service "$service"
echo ""
done
elif [[ " ${SERVICE_DIRS[@]} " =~ " ${TARGET} " ]]; then
build_service "$TARGET"
else
echo "Usage: ./build.sh [${SERVICE_DIRS[*]}|all] [--no-cache]"
exit 1
fi
echo "=== Build Complete ==="

161
mainroom/ctrl/deploy.sh Executable file
View File

@@ -0,0 +1,161 @@
#!/bin/bash
# Deploy core_nest to server
#
# Two deployment modes:
# 1. Docker (default): Full core_nest structure + source code
# 2. Bare metal (--bare-metal): Only pawprint source to systemd services
#
# Usage:
# ./deploy.sh # Deploy Docker setup (default)
# ./deploy.sh --bare-metal # Deploy bare metal pawprint only
# ./deploy.sh --dry-run # Preview what would be synced
# ./deploy.sh --bare-metal --dry-run # Preview bare metal sync
set -e
# Load configuration
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
source "$SCRIPT_DIR/.env.sync" 2>/dev/null || true
SERVER="${DEPLOY_SERVER:-mariano@mcrn.ar}"
REMOTE_PATH="${DEPLOY_REMOTE_PATH:-~/core_nest}"
BARE_METAL_PATH="${DEPLOY_BARE_METAL_PATH:-~/pawprint}"
# Source code paths (defaults if not in .env.sync)
LOCAL_AMAR_BACKEND="${LOCAL_AMAR_BACKEND:-$HOME/wdir/ama/amar_django_back}"
LOCAL_AMAR_FRONTEND="${LOCAL_AMAR_FRONTEND:-$HOME/wdir/ama/amar_frontend}"
LOCAL_PAWPRINT="${LOCAL_PAWPRINT:-$HOME/wdir/ama/pawprint}"
DRY_RUN=""
BARE_METAL=""
for arg in "$@"; do
case $arg in
--dry-run) DRY_RUN="--dry-run" ;;
--bare-metal) BARE_METAL="true" ;;
esac
done
cd "$SCRIPT_DIR/.." # Go to root (parent of ctrl/)
# Common rsync options (using single .exclude file for everything)
RSYNC_CMD="rsync -avz --mkpath --delete --progress --exclude-from=ctrl/.exclude"
# =============================================================================
# BARE METAL DEPLOYMENT
# =============================================================================
if [ -n "$BARE_METAL" ]; then
echo "=== Deploying to Bare Metal (Systemd Services) ==="
echo ""
# Only sync pawprint source
if [ -d "$LOCAL_PAWPRINT" ] && [ -f "$LOCAL_PAWPRINT/main.py" ]; then
echo "Syncing pawprint source to bare metal..."
$RSYNC_CMD $DRY_RUN \
"$LOCAL_PAWPRINT/" \
"$SERVER:$BARE_METAL_PATH/"
echo " ✓ Pawprint synced to $BARE_METAL_PATH"
else
echo "⚠ Pawprint not found at: $LOCAL_PAWPRINT"
exit 1
fi
echo ""
if [ -n "$DRY_RUN" ]; then
echo "=== Dry run complete ==="
exit 0
fi
echo "=== Bare Metal Sync Complete ==="
echo ""
echo "Next steps on server (as mariano user):"
echo " Restart systemd services:"
echo " sudo systemctl restart pawprint artery album ward"
echo ""
echo " Check status:"
echo " sudo systemctl status pawprint artery album ward"
echo ""
exit 0
fi
# =============================================================================
# DOCKER DEPLOYMENT (DEFAULT)
# =============================================================================
echo "=== Deploying to Docker ==="
echo ""
# 1. Sync core_nest structure (excluding src directories - they're synced separately)
echo "1. Syncing core_nest structure..."
$RSYNC_CMD $DRY_RUN \
--exclude='*/src/' \
./ \
"$SERVER:$REMOTE_PATH/"
echo " [OK] Core nest structure synced"
echo ""
# 2. Sync amar backend source
if [ -d "$LOCAL_AMAR_BACKEND" ]; then
echo "2. Syncing amar backend source..."
$RSYNC_CMD $DRY_RUN \
"$LOCAL_AMAR_BACKEND/" \
"$SERVER:$REMOTE_PATH/amar/src/back/"
echo " [OK] Backend synced"
else
echo "2. [WARN] Backend source not found at: $LOCAL_AMAR_BACKEND"
fi
echo ""
# 3. Sync amar frontend source
if [ -d "$LOCAL_AMAR_FRONTEND" ]; then
echo "3. Syncing amar frontend source..."
$RSYNC_CMD $DRY_RUN \
"$LOCAL_AMAR_FRONTEND/" \
"$SERVER:$REMOTE_PATH/amar/src/front/"
echo " [OK] Frontend synced"
else
echo "3. [WARN] Frontend source not found at: $LOCAL_AMAR_FRONTEND"
fi
echo ""
# 4. Sync pawprint source
if [ -d "$LOCAL_PAWPRINT" ] && [ -f "$LOCAL_PAWPRINT/main.py" ]; then
echo "4. Syncing pawprint source..."
$RSYNC_CMD $DRY_RUN \
"$LOCAL_PAWPRINT/" \
"$SERVER:$REMOTE_PATH/pawprint/src/"
echo " [OK] Pawprint synced"
else
echo "4. [INFO] Pawprint not found at: $LOCAL_PAWPRINT"
fi
echo ""
# 5. Sync tests to ward (silent fail if not available)
if [ -z "$DRY_RUN" ]; then
echo "5. Syncing tests to ward..."
if SILENT_FAIL=true "$SCRIPT_DIR/sync-tests.sh" >/dev/null 2>&1; then
echo " [OK] Tests synced"
else
echo " [SKIP] Tests sync not configured or not available"
fi
echo ""
fi
if [ -n "$DRY_RUN" ]; then
echo "=== Dry run complete ==="
exit 0
fi
echo "=== Docker Sync Complete ==="
echo ""
echo "Next steps on server (as mariano user):"
echo " 1. Setup (first time only):"
echo " ssh $SERVER 'cd $REMOTE_PATH/ctrl/server && ./setup.sh'"
echo ""
echo " 2. Setup test symlinks (optional, enables test sharing):"
echo " ssh $SERVER 'cd $REMOTE_PATH/ctrl/server && ./setup-symlinks.sh'"
echo " Or sync tests without symlinks: ./ctrl/sync-tests.sh"
echo ""
echo " 3. Build and start:"
echo " ssh $SERVER 'cd $REMOTE_PATH/ctrl && ./build.sh && ./start.sh -d'"
echo ""
echo "Note: Bare metal services remain running as fallback (*.bare.mcrn.ar)"

45
mainroom/ctrl/logs.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# View core_nest logs
#
# Usage:
# ./logs.sh # All logs
# ./logs.sh <service> # Service compose logs (e.g., amar, pawprint)
# ./logs.sh <container> # Specific container name (e.g., backend, db)
set -e
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
# Export core_nest/.env vars
if [ -f ".env" ]; then
export $(grep -v '^#' .env | grep -v '^$' | xargs)
fi
TARGET=${1:-all}
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
# NEST_NAME comes from core_nest/.env
NEST_NAME=${NEST_NAME:-core_nest}
if [[ " ${SERVICE_DIRS[@]} " =~ " ${TARGET} " ]]; then
# Service directory logs
cd "$TARGET" && docker compose logs -f
elif [ "$TARGET" = "all" ]; then
# All containers matching NEST_NAME
docker logs -f $(docker ps -q --filter "name=${NEST_NAME}") 2>/dev/null || \
echo "No ${NEST_NAME} containers running"
else
# Specific container name
docker logs -f "${NEST_NAME}_$TARGET" 2>/dev/null || \
docker logs -f "$TARGET" 2>/dev/null || \
echo "Container not found: $TARGET"
fi

110
mainroom/ctrl/server/.env Normal file
View File

@@ -0,0 +1,110 @@
# Core Nest - Environment Configuration
# This configuration is shared across all services in the nest
# =============================================================================
# DEPLOYMENT CONFIG
# =============================================================================
# Unique identifier for this deployment (used for container/network names)
DEPLOYMENT_NAME=core_nest
# Nest identifier (logical grouping of services)
NEST_NAME=core_nest
# Network name for Docker services
NETWORK_NAME=core_nest_network
# =============================================================================
# DOMAINS (Local Development)
# =============================================================================
# Domain for the managed application (e.g., amar)
MANAGED_DOMAIN=amar.local.com
# Domain for pawprint management interface
PAWPRINT_DOMAIN=pawprint.local.com
# =============================================================================
# PORTS (Local Development)
# =============================================================================
# Managed app ports
BACKEND_PORT=8000
FRONTEND_PORT=3000
# Pawprint ecosystem ports
PAWPRINT_PORT=13000
ARTERY_PORT=13001
ALBUM_PORT=13002
WARD_PORT=13003
# =============================================================================
# Ports
MANAGED_FRONTEND_PORT=3000
MANAGED_BACKEND_PORT=8000
# Backend location blocks (Django-specific)
MANAGED_BACKEND_LOCATIONS='
location /api/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
location /admin/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /static/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/static/;
}
'
# =============================================================================
# MANAGED DOMAIN CONFIG (AMAR-specific - core_nest context)
# =============================================================================
# Complete nginx location blocks for amar
MANAGED_LOCATIONS='
location /api/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
location /admin/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /static/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/static/;
}
location / {
proxy_pass http://${DEPLOYMENT_NAME}_frontend:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
'
# =============================================================================
# AMAR PATHS (core_nest specific - managed app)
# =============================================================================
BACKEND_PATH=../../amar_django_back
FRONTEND_PATH=../../amar_frontend
DOCKERFILE_BACKEND=../def/core_nest/amar/Dockerfile.backend
DOCKERFILE_FRONTEND=../def/core_nest/amar/Dockerfile.frontend
# Database seed data
INIT_DB_SEED=test

6
mainroom/ctrl/server/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
# Generated configuration files
.generated/
# Old backups
*.old
*.bak

View File

@@ -0,0 +1,234 @@
# Server Configuration
Everything that runs **on the server** (not locally).
## Purpose
This directory contains **server-side** scripts and configs that get deployed to AWS.
Separate from `ctrl/` which contains **local** orchestration scripts.
## Structure
```
server/
├── setup.sh # Idempotent server setup (run on AWS)
├── nginx/
│ └── core_nest.conf # Single nginx config for all services
└── scripts/ # Any other server-side scripts
```
## Expected Server Structure
When deployed, the AWS instance should look like:
```
~/core_nest/ # This repo (deployed via deploy.sh)
├── server/ # Server-side scripts
│ ├── setup.sh # Run this first
│ └── nginx/
├── ctrl/ # Local scripts (work remotely too)
│ ├── build.sh, start.sh, stop.sh, logs.sh, status.sh
│ └── manual_sync/
├── amar/
│ ├── docker-compose.yml
│ ├── .env # Production values
│ ├── Dockerfile.*
│ ├── init-db/
│ └── src/ # Synced from local via manual_sync/
│ ├── back/ # Django source
│ └── front/ # Next.js source
└── pawprint/
├── docker-compose.yml
├── .env # Production values
└── (bare metal or src/ depending on deployment)
```
## Usage
### First-Time Server Setup
```bash
# 1. From local machine: Deploy files
cd ~/wdir/ama/core_nest/ctrl
./deploy.sh
# 2. SSH to server
ssh mariano@mcrn.ar
# 3. Run server setup (idempotent - safe to re-run)
cd ~/core_nest/server
./setup.sh
```
This will:
- Ensure directory structure exists
- Install Docker, Docker Compose, Nginx, Certbot
- Check SSL certificates (prompts if missing)
- Install nginx config
- Create .env files from examples
### Updates/Changes
```bash
# From local: edit server/nginx/core_nest.conf or server/setup.sh
# Then deploy:
./deploy.sh
# On server: re-run setup to apply changes
ssh mariano@mcrn.ar 'cd ~/core_nest/server && ./setup.sh'
```
### Build and Start Services
```bash
# On server (or via SSH):
cd ~/core_nest/ctrl
./build.sh # Build all images
./start.sh -d # Start detached
./status.sh # Check status
```
## Key Files
### server/setup.sh
Idempotent setup script that runs on AWS:
- Checks/installs: Docker, Nginx, Certbot
- Verifies SSL certs exist
- Installs nginx config
- Creates .env files from examples
**Safe to run multiple times** - won't break existing setup.
### server/nginx/core_nest.conf
Single nginx config file for all services:
- amar.nest.mcrn.ar (frontend + backend)
- pawprint.mcrn.ar
- artery.mcrn.ar
- album.mcrn.ar
- ward.mcrn.ar
Edit this file locally, deploy, re-run setup.sh to apply.
## Environment Variables
Create production `.env` files:
```bash
# On server:
nano ~/core_nest/amar/.env # Set INIT_DB_SEED=test or prod
nano ~/core_nest/pawprint/.env # Set NEST_NAME, ports, etc.
```
## SSL Certificates
Certificates are managed via Let's Encrypt:
```bash
# Wildcard for *.nest.mcrn.ar (for amar)
sudo certbot certonly --manual --preferred-challenges dns -d '*.nest.mcrn.ar'
# Wildcard for *.mcrn.ar (for pawprint services)
sudo certbot certonly --manual --preferred-challenges dns -d '*.mcrn.ar'
```
Auto-renewal is handled by certbot systemd timer.
## Troubleshooting
### Nginx config test fails
```bash
sudo nginx -t
# Fix errors in server/nginx/core_nest.conf
```
### Services won't start
```bash
cd ~/core_nest/ctrl
./logs.sh # Check all logs
./logs.sh amar # Check specific service
docker ps -a # See all containers
```
### Database issues
```bash
# Check which seed data is configured
grep INIT_DB_SEED ~/core_nest/amar/.env
# Rebuild database (WARNING: deletes data)
cd ~/core_nest
docker compose -f amar/docker-compose.yml down -v
./ctrl/start.sh amar -d
```
## Test Directory Symlinking
### setup-symlinks.sh
**Purpose:** Create symlinks to share test directories across services on the same filesystem.
This allows ward/tester to access tests from amar_django_back_contracts without duplication.
```bash
# Preview changes
ssh mariano@mcrn.ar 'cd ~/core_nest/ctrl/server && ./setup-symlinks.sh --dry-run'
# Apply changes
ssh mariano@mcrn.ar 'cd ~/core_nest/ctrl/server && ./setup-symlinks.sh'
```
**What it does:**
- Creates symlinks from `pawprint/src/ward/tools/tester/tests/` to `amar/src/back/tests/contracts/`
- Symlinks each domain directory (mascotas, productos, solicitudes, workflows)
- Symlinks shared utilities (endpoints.py, helpers.py, base.py, conftest.py)
**Benefits:**
- Single source of truth for tests
- No duplication
- Tests automatically sync when backend is deployed
- Works across Docker containers sharing the same filesystem
**Alternative:** If symlinks don't work (different filesystems, Windows hosts), use `../ctrl/sync-tests.sh` to copy test files.
### sync-tests.sh (in ctrl/ directory)
**Purpose:** Sync test files as an alternative to symlinks.
```bash
# From local machine - sync to Docker
./ctrl/sync-tests.sh
# From local machine - sync to bare metal
./ctrl/sync-tests.sh --to-bare-metal
```
Use this when:
- Symlinks are not supported
- Services are on different filesystems
- You need independent test copies
### Verification
After setup, verify symlinks are working:
```bash
# Check symlinks exist
ssh mariano@mcrn.ar 'ls -lah ~/core_nest/pawprint/src/ward/tools/tester/tests'
# Verify they point to correct location
ssh mariano@mcrn.ar 'readlink ~/core_nest/pawprint/src/ward/tools/tester/tests/mascotas'
# Test in browser
open https://ward.mcrn.ar/tools/tester/
```
## Security Notes
- Never commit production `.env` files
- SSL certs in `/etc/letsencrypt/` (not in repo)
- Database volumes persist in Docker volumes
- Backup database regularly:
```bash
docker exec core_nest_db pg_dump -U postgres amarback > backup.sql
```

186
mainroom/ctrl/server/audit.sh Executable file
View File

@@ -0,0 +1,186 @@
#!/bin/bash
# Server Audit - Run on AWS to see current state
# Usage: ssh server 'bash -s' < ctrl/server/audit.sh
echo "=== SERVER AUDIT ==="
echo "Date: $(date)"
echo "Host: $(hostname)"
echo "User: $USER"
echo ""
# =============================================================================
# Directory Structure
# =============================================================================
echo "=== DIRECTORY STRUCTURE ==="
echo ""
echo "Home directory contents:"
ls -lah ~/
echo ""
echo "core_nest structure (if exists):"
if [ -d ~/core_nest ]; then
tree ~/core_nest -L 2 -I ".git" 2>/dev/null || find ~/core_nest -maxdepth 2 -type d | sort
else
echo " ~/core_nest does NOT exist"
fi
echo ""
echo "pawprint location:"
if [ -d ~/pawprint ]; then
ls -lah ~/pawprint/ | head -10
echo " ..."
else
echo " ~/pawprint does NOT exist"
fi
echo ""
# =============================================================================
# Docker
# =============================================================================
echo "=== DOCKER ==="
echo ""
echo "Docker version:"
docker --version 2>/dev/null || echo " Docker NOT installed"
echo ""
echo "Docker Compose version:"
docker compose version 2>/dev/null || echo " Docker Compose NOT installed"
echo ""
echo "Running containers:"
docker ps --format "table {{.Names}}\t{{.Image}}\t{{.Status}}\t{{.Ports}}" 2>/dev/null || echo " None or Docker not running"
echo ""
echo "All containers (including stopped):"
docker ps -a --format "table {{.Names}}\t{{.Image}}\t{{.Status}}" 2>/dev/null | head -20
echo ""
echo "Docker networks:"
docker network ls 2>/dev/null || echo " None"
echo ""
echo "Docker volumes:"
docker volume ls 2>/dev/null | grep -E "core_nest|amar|pawprint|DRIVER" || echo " No core_nest/amar/pawprint volumes"
echo ""
# =============================================================================
# Nginx
# =============================================================================
echo "=== NGINX ==="
echo ""
echo "Nginx version:"
nginx -v 2>&1 || echo " Nginx NOT installed"
echo ""
echo "Nginx status:"
systemctl status nginx --no-pager -l 2>/dev/null | head -5 || echo " Cannot check status"
echo ""
echo "Sites enabled:"
ls -lah /etc/nginx/sites-enabled/ 2>/dev/null || echo " Directory does not exist"
echo ""
echo "Sites available (core_nest related):"
ls -lah /etc/nginx/sites-available/ 2>/dev/null | grep -E "nest|amar|pawprint|artery|album|ward" || echo " None found"
echo ""
# =============================================================================
# SSL Certificates
# =============================================================================
echo "=== SSL CERTIFICATES ==="
echo ""
echo "Certbot version:"
certbot --version 2>/dev/null || echo " Certbot NOT installed"
echo ""
echo "Certificates:"
if [ -d /etc/letsencrypt/live ]; then
sudo ls -lah /etc/letsencrypt/live/ 2>/dev/null || echo " Permission denied"
else
echo " /etc/letsencrypt/live does NOT exist"
fi
echo ""
# =============================================================================
# Environment Files
# =============================================================================
echo "=== ENVIRONMENT FILES ==="
echo ""
for location in ~/core_nest/amar ~/core_nest/pawprint ~/pawprint; do
if [ -d "$location" ]; then
echo "$location/.env:"
if [ -f "$location/.env" ]; then
echo " EXISTS"
echo " Size: $(stat -c%s "$location/.env" 2>/dev/null || stat -f%z "$location/.env" 2>/dev/null) bytes"
echo " NEST_NAME: $(grep "^NEST_NAME=" "$location/.env" 2>/dev/null || echo "not set")"
echo " NETWORK_NAME: $(grep "^NETWORK_NAME=" "$location/.env" 2>/dev/null || echo "not set")"
else
echo " does NOT exist"
fi
echo "$location/.env.example:"
[ -f "$location/.env.example" ] && echo " EXISTS" || echo " does NOT exist"
echo ""
fi
done
# =============================================================================
# Ports in Use
# =============================================================================
echo "=== PORTS IN USE ==="
echo ""
echo "Listening on ports (3000, 8000, 13000-13003):"
sudo netstat -tlnp 2>/dev/null | grep -E ":3000|:8000|:1300[0-3]" || sudo ss -tlnp | grep -E ":3000|:8000|:1300[0-3]" || echo " Cannot check (need sudo)"
echo ""
# =============================================================================
# Systemd Services
# =============================================================================
echo "=== SYSTEMD SERVICES ==="
echo ""
echo "Pawprint-related services:"
systemctl list-units --type=service --all 2>/dev/null | grep -E "pawprint|artery|album|ward" || echo " None found"
echo ""
# =============================================================================
# Disk Usage
# =============================================================================
echo "=== DISK USAGE ==="
echo ""
echo "Overall:"
df -h / 2>/dev/null
echo ""
echo "Docker space:"
docker system df 2>/dev/null || echo " Docker not available"
echo ""
# =============================================================================
# Summary
# =============================================================================
echo "=== SUMMARY ==="
echo ""
echo "Key Questions:"
echo ""
echo "1. Is there an existing core_nest deployment?"
[ -d ~/core_nest ] && echo " YES - ~/core_nest exists" || echo " NO"
echo ""
echo "2. Are Docker containers running?"
docker ps -q 2>/dev/null | wc -l | xargs -I {} echo " {} containers running"
echo ""
echo "3. Is nginx configured for core_nest?"
[ -f /etc/nginx/sites-enabled/core_nest.conf ] && echo " YES - core_nest.conf installed" || echo " NO"
echo ""
echo "4. Are there old individual nginx configs?"
ls /etc/nginx/sites-enabled/ 2>/dev/null | grep -E "amar|pawprint|artery|album|ward" | wc -l | xargs -I {} echo " {} old configs found"
echo ""
echo "5. SSL certificates present?"
[ -d /etc/letsencrypt/live/nest.mcrn.ar ] && echo " *.nest.mcrn.ar: YES" || echo " *.nest.mcrn.ar: NO"
[ -d /etc/letsencrypt/live/mcrn.ar ] && echo " *.mcrn.ar: YES" || echo " *.mcrn.ar: NO"
echo ""
echo "=== END AUDIT ==="

156
mainroom/ctrl/server/cleanup.sh Executable file
View File

@@ -0,0 +1,156 @@
#!/bin/bash
# Server Cleanup - Run on AWS to prepare for fresh deployment
# This script safely cleans up old deployments
#
# Usage: ssh server 'cd ~/core_nest/ctrl/server && ./cleanup.sh'
set -e
echo "=== SERVER CLEANUP ==="
echo ""
echo "⚠️ This will:"
echo " - Stop all Docker containers"
echo " - Remove old nginx configs"
echo " - Keep data volumes and SSL certs"
echo " - Keep .env files"
echo ""
read -p "Continue? (y/N) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Aborted."
exit 1
fi
# =============================================================================
# 1. Stop Docker Containers
# =============================================================================
echo ""
echo "Step 1: Stopping Docker containers..."
# Stop containers if Docker is available
if command -v docker &> /dev/null; then
# Stop all core_nest/amar/pawprint containers
CONTAINERS=$(docker ps -q --filter "name=core_nest" --filter "name=amar" --filter "name=pawprint" 2>/dev/null || true)
if [ -n "$CONTAINERS" ]; then
echo " Stopping containers..."
docker stop $CONTAINERS 2>/dev/null || true
echo " ✓ Containers stopped"
else
echo " ✓ No running containers to stop"
fi
else
echo " ⓘ Docker not installed, skipping..."
fi
# =============================================================================
# 2. Stop Systemd Services
# =============================================================================
echo ""
echo "Step 2: Stopping systemd services..."
SERVICES=$(systemctl list-units --type=service --all --no-pager 2>/dev/null | grep -E "pawprint|artery|album|ward" | awk '{print $1}' || true)
if [ -n "$SERVICES" ]; then
echo " Found services: $SERVICES"
for service in $SERVICES; do
echo " Stopping $service..."
sudo systemctl stop "$service" 2>/dev/null || true
sudo systemctl disable "$service" 2>/dev/null || true
done
echo " ✓ Services stopped and disabled"
else
echo " ✓ No systemd services found"
fi
# =============================================================================
# 3. Clean Up Nginx Configs
# =============================================================================
echo ""
echo "Step 3: Cleaning up old nginx configs..."
if [ -d /etc/nginx/sites-enabled ]; then
# Remove old individual configs
OLD_CONFIGS=(
"amar.nest.mcrn.ar"
"amar.nest.mcrn.ar.conf"
"api.amar.nest.mcrn.ar"
"api.amar.nest.mcrn.ar.conf"
"pawprint.mcrn.ar"
"pawprint.mcrn.ar.conf"
"artery.mcrn.ar"
"artery.mcrn.ar.conf"
"album.mcrn.ar"
"album.mcrn.ar.conf"
"ward.mcrn.ar"
"ward.mcrn.ar.conf"
)
for config in "${OLD_CONFIGS[@]}"; do
if [ -L "/etc/nginx/sites-enabled/$config" ] || [ -f "/etc/nginx/sites-enabled/$config" ]; then
echo " Removing /etc/nginx/sites-enabled/$config"
sudo rm -f "/etc/nginx/sites-enabled/$config"
fi
if [ -f "/etc/nginx/sites-available/$config" ]; then
echo " Removing /etc/nginx/sites-available/$config"
sudo rm -f "/etc/nginx/sites-available/$config"
fi
done
echo " ✓ Old nginx configs removed"
# Test nginx config
if command -v nginx &> /dev/null; then
if sudo nginx -t 2>/dev/null; then
echo " Reloading nginx..."
sudo systemctl reload nginx 2>/dev/null || true
fi
fi
else
echo " ⓘ Nginx not configured, skipping..."
fi
# =============================================================================
# 4. Verify What's Kept
# =============================================================================
echo ""
echo "Step 4: Verifying preserved data..."
# Check Docker volumes
if command -v docker &> /dev/null; then
VOLUMES=$(docker volume ls -q | grep -E "core_nest|amar|pawprint" 2>/dev/null || true)
if [ -n "$VOLUMES" ]; then
echo " ✓ Docker volumes preserved:"
docker volume ls | grep -E "core_nest|amar|pawprint|DRIVER" || true
fi
fi
# Check .env files
echo ""
echo " .env files preserved:"
for envfile in ~/core_nest/amar/.env ~/core_nest/pawprint/.env ~/pawprint/.env; do
[ -f "$envfile" ] && echo "$envfile" || true
done
# Check SSL certs
echo ""
echo " SSL certificates preserved:"
[ -d /etc/letsencrypt/live/nest.mcrn.ar ] && echo " ✓ *.nest.mcrn.ar" || echo " ✗ *.nest.mcrn.ar (missing)"
[ -d /etc/letsencrypt/live/mcrn.ar ] && echo " ✓ *.mcrn.ar" || echo " ✗ *.mcrn.ar (missing)"
# =============================================================================
# Done
# =============================================================================
echo ""
echo "=== Cleanup Complete ==="
echo ""
echo "Next steps:"
echo " 1. Deploy from local:"
echo " ./ctrl/deploy.sh"
echo ""
echo " 2. Run server setup:"
echo " cd ~/core_nest/ctrl/server && ./setup.sh"
echo ""
echo " 3. Build and start:"
echo " cd ~/core_nest/ctrl && ./build.sh && ./start.sh -d"
echo ""

185
mainroom/ctrl/server/configure.sh Executable file
View File

@@ -0,0 +1,185 @@
#!/bin/bash
# Configure - Generate configuration files
# Run as appuser (mariano), no sudo required
#
# Usage:
# ./configure.sh
#
# Generates:
# - Nginx configs for core_nest
# - Validates .env files
# - Outputs to .generated/ directory
#
# After running this, admin runs: sudo ./setup.sh
set -e
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
GEN_DIR="$SCRIPT_DIR/.generated"
CORE_NEST_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
echo "=== Core Nest Configure ==="
echo ""
echo "This script generates configuration files for deployment."
echo "Run as appuser (no sudo required)."
echo ""
# Ensure we're NOT running as root
if [ "$EUID" -eq 0 ]; then
echo "ERROR: Do not run this script with sudo"
echo "Run as appuser instead: ./configure.sh"
exit 1
fi
# =============================================================================
# 1. Create .generated directory
# =============================================================================
echo "Step 1: Preparing output directory..."
mkdir -p "$GEN_DIR"
echo " Output directory: $GEN_DIR"
# =============================================================================
# 2. Load and validate environment
# =============================================================================
echo ""
echo "Step 2: Loading environment..."
# Load core_nest/.env
if [ -f "$CORE_NEST_ROOT/.env" ]; then
set -a
source "$CORE_NEST_ROOT/.env"
set +a
echo " Loaded: core_nest/.env"
else
echo " ERROR: core_nest/.env not found"
exit 1
fi
# Validate required vars
REQUIRED_VARS="NEST_NAME DEPLOYMENT_NAME NETWORK_NAME MANAGED_DOMAIN PAWPRINT_DOMAIN"
MISSING=""
for var in $REQUIRED_VARS; do
if [ -z "${!var}" ]; then
MISSING="$MISSING $var"
fi
done
if [ -n "$MISSING" ]; then
echo " ERROR: Missing required vars in core_nest/.env:$MISSING"
exit 1
fi
echo " NEST_NAME: $NEST_NAME"
echo " DEPLOYMENT_NAME: $DEPLOYMENT_NAME"
echo " MANAGED_DOMAIN: $MANAGED_DOMAIN"
echo " PAWPRINT_DOMAIN: $PAWPRINT_DOMAIN"
# =============================================================================
# 3. Check .env files for services
# =============================================================================
echo ""
echo "Step 3: Checking service .env files..."
for service in amar pawprint; do
SERVICE_DIR="$CORE_NEST_ROOT/$service"
if [ ! -f "$SERVICE_DIR/.env" ]; then
if [ -f "$SERVICE_DIR/.env.example" ]; then
echo " Creating $service/.env from example..."
cp "$SERVICE_DIR/.env.example" "$SERVICE_DIR/.env"
echo " ⚠️ Edit $service/.env with production values before deployment"
else
echo " ERROR: $service/.env.example not found"
exit 1
fi
else
echo "$service/.env exists"
fi
done
# =============================================================================
# 4. Generate Nginx configuration
# =============================================================================
echo ""
echo "Step 4: Generating nginx configuration..."
TEMPLATE="$SCRIPT_DIR/nginx/core_nest.conf.template"
OUTPUT="$GEN_DIR/core_nest.nginx.conf"
if [ ! -f "$TEMPLATE" ]; then
echo " ERROR: Template not found: $TEMPLATE"
exit 1
fi
# Check for SSL certificates (just warn, don't fail)
SSL_CERT_AMAR="/etc/letsencrypt/live/nest.mcrn.ar/fullchain.pem"
SSL_KEY_AMAR="/etc/letsencrypt/live/nest.mcrn.ar/privkey.pem"
SSL_CERT_PAWPRINT="/etc/letsencrypt/live/mcrn.ar/fullchain.pem"
SSL_KEY_PAWPRINT="/etc/letsencrypt/live/mcrn.ar/privkey.pem"
echo " Checking SSL certificates..."
for cert in "$SSL_CERT_AMAR" "$SSL_KEY_AMAR" "$SSL_CERT_PAWPRINT" "$SSL_KEY_PAWPRINT"; do
if [ -f "$cert" ]; then
echo "$(basename $cert)"
else
echo " ⚠️ Missing: $cert"
echo " Admin will need to generate SSL certificates"
fi
done
# Generate nginx config from template
export NEST_NAME DEPLOYMENT_NAME MANAGED_DOMAIN PAWPRINT_DOMAIN
export SSL_CERT_AMAR SSL_KEY_AMAR SSL_CERT_PAWPRINT SSL_KEY_PAWPRINT
envsubst < "$TEMPLATE" > "$OUTPUT"
echo " ✓ Generated: $OUTPUT"
# =============================================================================
# 5. Generate deployment summary
# =============================================================================
echo ""
echo "Step 5: Generating deployment summary..."
SUMMARY="$GEN_DIR/DEPLOYMENT.txt"
cat > "$SUMMARY" <<EOF
Core Nest Deployment Configuration
Generated: $(date)
User: $USER
Host: $(hostname)
=== Environment ===
NEST_NAME=$NEST_NAME
DEPLOYMENT_NAME=$DEPLOYMENT_NAME
NETWORK_NAME=$NETWORK_NAME
MANAGED_DOMAIN=$MANAGED_DOMAIN
PAWPRINT_DOMAIN=$PAWPRINT_DOMAIN
=== Generated Files ===
- core_nest.nginx.conf → /etc/nginx/sites-available/core_nest.conf
=== Next Steps ===
1. Review generated files in: $GEN_DIR
2. Have admin run: sudo ./setup.sh
EOF
echo " ✓ Generated: $SUMMARY"
# =============================================================================
# Done
# =============================================================================
echo ""
echo "=== Configuration Complete ==="
echo ""
echo "Generated files in: $GEN_DIR"
echo ""
echo "Next steps:"
echo " 1. Review generated nginx config:"
echo " cat $OUTPUT"
echo ""
echo " 2. Have system admin run:"
echo " sudo ./setup.sh"
echo ""
echo " 3. Or review deployment summary:"
echo " cat $SUMMARY"
echo ""

View File

@@ -0,0 +1,55 @@
#!/bin/bash
# Install nginx config for core_nest
# Run with: sudo ./install-nginx.sh
set -e
# Application user (can be overridden with environment variable)
APP_USER="${APP_USER:-mariano}"
APP_HOME="/home/${APP_USER}"
NGINX_SOURCE="${APP_HOME}/core_nest/ctrl/server/nginx/core_nest.conf"
NGINX_AVAILABLE="/etc/nginx/sites-available/core_nest.conf"
NGINX_ENABLED="/etc/nginx/sites-enabled/core_nest.conf"
echo "=== Installing nginx config for core_nest ==="
echo "App user: $APP_USER"
echo "App home: $APP_HOME"
echo ""
# Check if source file exists
if [ ! -f "$NGINX_SOURCE" ]; then
echo "Error: Source file not found: $NGINX_SOURCE"
exit 1
fi
# Copy to sites-available
echo "Installing config to sites-available..."
cp "$NGINX_SOURCE" "$NGINX_AVAILABLE"
echo " ✓ Config installed to $NGINX_AVAILABLE"
# Create symlink to sites-enabled
echo "Enabling config..."
ln -sf "$NGINX_AVAILABLE" "$NGINX_ENABLED"
echo " ✓ Config enabled"
# Test nginx configuration
echo ""
echo "Testing nginx configuration..."
if nginx -t; then
echo " ✓ Nginx config is valid"
# Reload nginx
echo ""
echo "Reloading nginx..."
systemctl reload nginx
echo " ✓ Nginx reloaded"
echo ""
echo "=== Installation complete ==="
else
echo ""
echo "Error: Nginx configuration test failed"
echo "Config was installed but nginx was not reloaded"
exit 1
fi

View File

@@ -0,0 +1,292 @@
# Core Nest - All Services Nginx Config
# Single config for entire nest deployment
#
# Docker Services (primary):
# - amar.nest.mcrn.ar (frontend:3000 + backend:8000)
# - pawprint.mcrn.ar (port 13000)
# - artery.mcrn.ar (port 13001)
# - album.mcrn.ar (port 13002)
# - ward.mcrn.ar (port 13003)
#
# Bare Metal Services (fallback):
# - pawprint.bare.mcrn.ar (port 12000)
# - artery.bare.mcrn.ar (port 12001)
# - album.bare.mcrn.ar (port 12002)
# - ward.bare.mcrn.ar (port 12003)
# =============================================================================
# AMAR - Frontend + Backend
# =============================================================================
server {
listen 80;
server_name amar.nest.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name amar.nest.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/nest.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/nest.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Backend API
location /api/ {
proxy_pass http://127.0.0.1:8000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
# Django admin
location /admin/ {
proxy_pass http://127.0.0.1:8000/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Django static files
location /static/ {
proxy_pass http://127.0.0.1:8000/static/;
}
# Frontend (default)
location / {
proxy_pass http://127.0.0.1:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
}
# =============================================================================
# PAWPRINT - Main Service
# =============================================================================
server {
listen 80;
server_name pawprint.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name pawprint.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/pawprint.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/pawprint.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:13000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# ARTERY - API Gateway
# =============================================================================
server {
listen 80;
server_name artery.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name artery.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/artery.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/artery.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:13001;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# ALBUM - Media Service
# =============================================================================
server {
listen 80;
server_name album.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name album.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/album.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/album.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:13002;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# WARD - Admin Interface
# =============================================================================
server {
listen 80;
server_name ward.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name ward.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/ward.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/ward.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:13003;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# BARE METAL SERVICES (FALLBACK)
# =============================================================================
# =============================================================================
# PAWPRINT BARE - Main Service (Bare Metal)
# =============================================================================
server {
listen 80;
server_name pawprint.bare.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name pawprint.bare.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/bare.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/bare.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:12000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# ARTERY BARE - API Gateway (Bare Metal)
# =============================================================================
server {
listen 80;
server_name artery.bare.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name artery.bare.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/bare.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/bare.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:12001;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# ALBUM BARE - Media Service (Bare Metal)
# =============================================================================
server {
listen 80;
server_name album.bare.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name album.bare.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/bare.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/bare.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:12002;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# =============================================================================
# WARD BARE - Admin Interface (Bare Metal)
# =============================================================================
server {
listen 80;
server_name ward.bare.mcrn.ar;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name ward.bare.mcrn.ar;
ssl_certificate /etc/letsencrypt/live/bare.mcrn.ar/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/bare.mcrn.ar/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
proxy_pass http://127.0.0.1:12003;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,107 @@
# Core Nest - Nginx Config Template
# Generated from environment variables
#
# Environment variables:
# DOMAIN_AMAR - Amar domain (e.g., amarmascotas.local.com or amar.nest.mcrn.ar)
# DOMAIN_PAWPRINT - Pawprint domain (e.g., pawprint.local.com or pawprint.mcrn.ar)
# USE_SSL - true/false - whether to use SSL
# SSL_CERT_PATH - Path to SSL certificate (if USE_SSL=true)
# SSL_KEY_PATH - Path to SSL key (if USE_SSL=true)
# BACKEND_PORT - Backend port (default: 8000)
# FRONTEND_PORT - Frontend port (default: 3000)
# PAWPRINT_PORT - Pawprint port (default: 13000)
# =============================================================================
# AMAR - Frontend + Backend
# =============================================================================
server {
listen 80;
server_name ${DOMAIN_AMAR};
${SSL_REDIRECT}
# Backend API
location /api/ {
proxy_pass http://127.0.0.1:${BACKEND_PORT}/api/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_read_timeout 300;
}
# Django admin
location /admin/ {
proxy_pass http://127.0.0.1:${BACKEND_PORT}/admin/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Django static files
location /static/ {
proxy_pass http://127.0.0.1:${BACKEND_PORT}/static/;
}
# Frontend (default)
location / {
proxy_pass http://127.0.0.1:${FRONTEND_PORT};
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_read_timeout 300;
# WebSocket support for Next.js hot reload
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
}
}
${SSL_SERVER_BLOCK}
# =============================================================================
# PAWPRINT - Main Service + Ecosystem
# =============================================================================
server {
listen 80;
server_name ${DOMAIN_PAWPRINT};
${PAWPRINT_SSL_REDIRECT}
# Artery - API Gateway
location /artery/ {
proxy_pass http://127.0.0.1:${ARTERY_PORT}/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Album - Media Service
location /album/ {
proxy_pass http://127.0.0.1:${ALBUM_PORT}/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Ward - Admin Interface
location /ward/ {
proxy_pass http://127.0.0.1:${WARD_PORT}/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Pawprint - Main Service (default)
location / {
proxy_pass http://127.0.0.1:${PAWPRINT_PORT};
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
}
${PAWPRINT_SSL_SERVER_BLOCK}

View File

@@ -0,0 +1,152 @@
# Nginx Config Template for Docker Local Development
# Uses environment variables from .env files
# Variables: DEPLOYMENT_NAME, NEST_NAME, MANAGED_DOMAIN, PAWPRINT_DOMAIN
# =============================================================================
# MANAGED APP WITH WRAPPER - amar.nest.local.com
# =============================================================================
server {
listen 80;
server_name ${MANAGED_DOMAIN};
# Wrapper static files
location /wrapper/ {
alias /app/wrapper/;
add_header Cache-Control "no-cache";
}
# Backend API
location /api/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
# Django admin
location /admin/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Django static files
location /static/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/static/;
}
# Frontend with wrapper injection
location / {
proxy_pass http://${DEPLOYMENT_NAME}_frontend:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
# WebSocket support for Next.js hot reload
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Inject wrapper scripts into HTML
sub_filter '</head>' '<link rel="stylesheet" href="/wrapper/sidebar.css"><script src="/wrapper/sidebar.js"></script></head>';
sub_filter_once on;
proxy_set_header Accept-Encoding "";
}
}
# =============================================================================
# MANAGED APP WITHOUT WRAPPER - amar.local.com
# =============================================================================
server {
listen 80;
server_name amar.local.com;
# Backend API
location /api/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
}
# Django admin
location /admin/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/admin/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Django static files
location /static/ {
proxy_pass http://${DEPLOYMENT_NAME}_backend:8000/static/;
}
# Frontend (clean, no wrapper)
location / {
proxy_pass http://${DEPLOYMENT_NAME}_frontend:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300;
# WebSocket support for Next.js hot reload
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
# =============================================================================
# PAWPRINT - Main Service + Ecosystem
# =============================================================================
server {
listen 80;
server_name ${PAWPRINT_DOMAIN};
# Artery - API Gateway
location /artery/ {
proxy_pass http://${NEST_NAME}_artery:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Album - Media Service
location /album/ {
proxy_pass http://${NEST_NAME}_album:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Ward - Admin Interface
location /ward/ {
proxy_pass http://${NEST_NAME}_ward:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Pawprint - Main Service (default)
location / {
proxy_pass http://${NEST_NAME}_pawprint:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,6 @@
# Conditional wrapper injection based on ENABLE_WRAPPER env var
{{if ENABLE_WRAPPER}}
sub_filter '</head>' '<link rel="stylesheet" href="/wrapper/sidebar.css"><script src="/wrapper/sidebar.js"></script></head>';
sub_filter_once on;
proxy_set_header Accept-Encoding "";
{{endif}}

View File

@@ -0,0 +1,60 @@
# Nginx Config Template for Docker
# Uses environment variables from .env files
# Variables: DEPLOYMENT_NAME, MANAGED_DOMAIN, PAWPRINT_DOMAIN, MANAGED_*
# =============================================================================
# MANAGED DOMAIN
# =============================================================================
# Completely defined by the parent deployment (e.g., core_nest)
# Pawprint doesn't know or care about the managed app's structure
server {
listen 80;
server_name ${MANAGED_DOMAIN};
# All location blocks defined in MANAGED_LOCATIONS env var
${MANAGED_LOCATIONS}
}
# =============================================================================
# PAWPRINT - Main Service + Ecosystem
# =============================================================================
server {
listen 80;
server_name ${PAWPRINT_DOMAIN};
# Artery - API Gateway
location /artery/ {
proxy_pass http://${DEPLOYMENT_NAME}_artery:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Album - Media Service
location /album/ {
proxy_pass http://${DEPLOYMENT_NAME}_album:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Ward - Admin Interface
location /ward/ {
proxy_pass http://${DEPLOYMENT_NAME}_ward:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Pawprint - Main Service (default)
location / {
proxy_pass http://${DEPLOYMENT_NAME}_pawprint:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}

View File

@@ -0,0 +1,23 @@
#!/bin/sh
# Generate nginx config based on ENABLE_WRAPPER env var
TEMPLATE="/etc/nginx/templates/docker-local.conf.template"
OUTPUT="/etc/nginx/conf.d/default.conf"
# Start with the template
cp "$TEMPLATE" "$OUTPUT"
# If ENABLE_WRAPPER is not true, remove wrapper injection
if [ "$ENABLE_WRAPPER" != "true" ]; then
echo "Wrapper disabled - removing injection lines"
sed -i '/wrapper/d' "$OUTPUT"
sed -i '/sub_filter/d' "$OUTPUT"
sed -i '/Accept-Encoding/d' "$OUTPUT"
fi
# Replace env vars
envsubst '${DEPLOYMENT_NAME} ${NEST_NAME} ${MANAGED_DOMAIN} ${PAWPRINT_DOMAIN}' < "$OUTPUT" > /tmp/nginx.conf
mv /tmp/nginx.conf "$OUTPUT"
echo "Nginx config generated (ENABLE_WRAPPER=$ENABLE_WRAPPER)"
cat "$OUTPUT"

View File

@@ -0,0 +1,160 @@
#!/bin/bash
# Setup symlinks for test directories
# Enables sharing test directories across different services on the same filesystem
#
# This script should be run on the AWS server after deployment
# It creates symlinks to allow ward/tester to access test data from amar_django_back_contracts
#
# Usage:
# ./setup-symlinks.sh [--dry-run]
set -e
DRY_RUN=""
if [ "$1" == "--dry-run" ]; then
DRY_RUN="echo [DRY-RUN]"
fi
echo "=== Setting up Test Directory Symlinks ==="
echo ""
# Check if we're on the server
if [ ! -d "$HOME/core_nest" ]; then
echo "Error: ~/core_nest directory not found"
echo "This script should run on the AWS server after deployment"
exit 1
fi
cd "$HOME/core_nest"
# =============================================================================
# Test Directory Symlinks
# =============================================================================
echo "Step 1: Creating symlinks for test directories..."
echo ""
# Ward tester tests directory
WARD_TESTS_DIR="pawprint/src/ward/tools/tester/tests"
CONTRACTS_SOURCE="amar/src/back/tests/contracts"
# Create ward tests directory if it doesn't exist
if [ ! -d "$WARD_TESTS_DIR" ]; then
$DRY_RUN mkdir -p "$WARD_TESTS_DIR"
echo " Created $WARD_TESTS_DIR"
fi
# Check if source contracts directory exists
if [ ! -d "$CONTRACTS_SOURCE" ]; then
echo " ⚠ Warning: Source contracts directory not found: $CONTRACTS_SOURCE"
echo " Skipping test symlinks"
else
# Create symlinks for each test domain
for domain_dir in "$CONTRACTS_SOURCE"/*; do
if [ -d "$domain_dir" ]; then
domain_name=$(basename "$domain_dir")
# Skip __pycache__ and other Python artifacts
if [[ "$domain_name" == "__pycache__" ]] || [[ "$domain_name" == "*.pyc" ]]; then
continue
fi
target_link="$WARD_TESTS_DIR/$domain_name"
# Remove existing symlink or directory
if [ -L "$target_link" ]; then
$DRY_RUN rm "$target_link"
echo " Removed existing symlink: $target_link"
elif [ -d "$target_link" ]; then
echo " ⚠ Warning: $target_link exists as directory, not symlink"
echo " To replace with symlink, manually remove: rm -rf $target_link"
continue
fi
# Create relative symlink
# From: pawprint/src/ward/tools/tester/tests/
# To: amar/src/back/tests/contracts/
# Relative path: ../../../../../amar/src/back/tests/contracts/
$DRY_RUN ln -s "../../../../../$CONTRACTS_SOURCE/$domain_name" "$target_link"
echo " ✓ Created symlink: $target_link -> $domain_name"
fi
done
# Also symlink shared test utilities
for shared_file in "endpoints.py" "helpers.py" "base.py" "conftest.py"; do
source_file="$CONTRACTS_SOURCE/$shared_file"
target_file="$WARD_TESTS_DIR/$shared_file"
if [ -f "$source_file" ]; then
if [ -L "$target_file" ]; then
$DRY_RUN rm "$target_file"
fi
if [ ! -e "$target_file" ]; then
$DRY_RUN ln -s "../../../../../$source_file" "$target_file"
echo " ✓ Created symlink: $target_file"
fi
fi
done
fi
echo ""
# =============================================================================
# Bare Metal Symlinks (if bare metal path exists)
# =============================================================================
if [ -d "$HOME/pawprint" ]; then
echo "Step 2: Creating bare metal symlinks..."
echo ""
BARE_WARD_TESTS="$HOME/pawprint/ward/tools/tester/tests"
if [ ! -d "$BARE_WARD_TESTS" ]; then
$DRY_RUN mkdir -p "$BARE_WARD_TESTS"
echo " Created $BARE_WARD_TESTS"
fi
# For bare metal, we can symlink to the docker contract source if it's synced
# Or we can sync tests separately (handled by sync-tests.sh)
echo " Bare metal tests managed by sync-tests.sh"
echo " Run: $HOME/core_nest/ctrl/sync-tests.sh"
else
echo "Step 2: Bare metal path not found, skipping"
fi
echo ""
# =============================================================================
# Verification
# =============================================================================
echo "=== Verification ==="
echo ""
if [ -d "$WARD_TESTS_DIR" ]; then
echo "Ward tester tests:"
ls -lah "$WARD_TESTS_DIR" | grep -E "^l|^d" || echo " No directories or symlinks found"
else
echo " ⚠ Ward tests directory not found"
fi
echo ""
# =============================================================================
# Done
# =============================================================================
if [ -n "$DRY_RUN" ]; then
echo "=== Dry run complete (no changes made) ==="
else
echo "=== Symlink Setup Complete ==="
fi
echo ""
echo "Next steps:"
echo " 1. Verify symlinks are working:"
echo " ls -lah $WARD_TESTS_DIR"
echo ""
echo " 2. Restart ward container to pick up changes:"
echo " cd ~/core_nest/ctrl && docker compose restart ward"
echo ""
echo " 3. Test in browser:"
echo " https://ward.mcrn.ar/tools/tester/"
echo ""

217
mainroom/ctrl/server/setup.sh Executable file
View File

@@ -0,0 +1,217 @@
#!/bin/bash
# Setup - Apply configuration to system
# Must run with sudo/as root
#
# Usage:
# sudo ./setup.sh
#
# Prerequisites:
# - Run ./configure.sh first (as appuser)
#
# This script:
# - Installs system packages (docker, nginx, certbot)
# - Applies generated nginx config to /etc/nginx/
# - Manages nginx service
set -e
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
GEN_DIR="$SCRIPT_DIR/.generated"
echo "=== Core Nest Setup (System Configuration) ==="
echo ""
# Must run as root
if [ "$EUID" -ne 0 ]; then
echo "ERROR: This script must be run with sudo"
echo "Usage: sudo ./setup.sh"
exit 1
fi
# Get the actual user who ran sudo
if [ -z "$SUDO_USER" ]; then
echo "ERROR: SUDO_USER not set"
echo "Run with: sudo ./setup.sh (not as root directly)"
exit 1
fi
ACTUAL_USER="$SUDO_USER"
ACTUAL_HOME=$(eval echo ~$ACTUAL_USER)
echo "Running as: root (via sudo)"
echo "Actual user: $ACTUAL_USER"
echo "User home: $ACTUAL_HOME"
echo ""
# Check that configure was run first
if [ ! -d "$GEN_DIR" ] || [ ! -f "$GEN_DIR/core_nest.nginx.conf" ]; then
echo "ERROR: Configuration files not found"
echo ""
echo "Run ./configure.sh first (as $ACTUAL_USER):"
echo " su - $ACTUAL_USER"
echo " cd $(dirname $SCRIPT_DIR)"
echo " ./server/configure.sh"
exit 1
fi
echo "✓ Found generated configuration files"
echo ""
# =============================================================================
# 1. Install System Dependencies
# =============================================================================
echo "Step 1: Installing system dependencies..."
echo ""
# Docker
if ! command -v docker &> /dev/null; then
echo " Installing Docker..."
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
sh /tmp/get-docker.sh
rm /tmp/get-docker.sh
echo " ✓ Docker installed"
else
echo " ✓ Docker already installed"
fi
# Add user to docker group
if ! groups "$ACTUAL_USER" | grep -q docker; then
echo " Adding $ACTUAL_USER to docker group..."
usermod -aG docker "$ACTUAL_USER"
echo "$ACTUAL_USER added to docker group"
echo " (User must log out and back in for this to take effect)"
else
echo "$ACTUAL_USER already in docker group"
fi
# Docker Compose
if ! docker compose version &> /dev/null; then
echo " Installing Docker Compose plugin..."
apt-get update
apt-get install -y docker-compose-plugin
echo " ✓ Docker Compose installed"
else
echo " ✓ Docker Compose already installed"
fi
# Nginx
if ! command -v nginx &> /dev/null; then
echo " Installing Nginx..."
apt-get update
apt-get install -y nginx
echo " ✓ Nginx installed"
else
echo " ✓ Nginx already installed"
fi
# Certbot
if ! command -v certbot &> /dev/null; then
echo " Installing Certbot..."
apt-get update
apt-get install -y certbot python3-certbot-nginx
echo " ✓ Certbot installed"
else
echo " ✓ Certbot already installed"
fi
# =============================================================================
# 2. Install Nginx Configuration
# =============================================================================
echo ""
echo "Step 2: Installing nginx configuration..."
NGINX_AVAILABLE="/etc/nginx/sites-available/core_nest.conf"
NGINX_ENABLED="/etc/nginx/sites-enabled/core_nest.conf"
SOURCE_CONFIG="$GEN_DIR/core_nest.nginx.conf"
# Copy generated config
cp "$SOURCE_CONFIG" "$NGINX_AVAILABLE"
echo " ✓ Copied to: $NGINX_AVAILABLE"
# Enable site
ln -sf "$NGINX_AVAILABLE" "$NGINX_ENABLED"
echo " ✓ Enabled site: $NGINX_ENABLED"
# Remove default site if exists
if [ -f "/etc/nginx/sites-enabled/default" ]; then
rm "/etc/nginx/sites-enabled/default"
echo " ✓ Removed default site"
fi
# Test nginx config
echo " Testing nginx configuration..."
if nginx -t; then
echo " ✓ Nginx configuration valid"
else
echo " ERROR: Nginx configuration test failed"
exit 1
fi
# =============================================================================
# 3. Manage Nginx Service
# =============================================================================
echo ""
echo "Step 3: Managing nginx service..."
if systemctl is-active --quiet nginx; then
echo " Reloading nginx..."
systemctl reload nginx
echo " ✓ Nginx reloaded"
else
echo " Starting nginx..."
systemctl start nginx
systemctl enable nginx
echo " ✓ Nginx started and enabled"
fi
# =============================================================================
# 4. SSL Certificate Information
# =============================================================================
echo ""
echo "Step 4: SSL certificates..."
SSL_CERTS=(
"/etc/letsencrypt/live/nest.mcrn.ar"
"/etc/letsencrypt/live/mcrn.ar"
)
ALL_EXIST=true
for cert_dir in "${SSL_CERTS[@]}"; do
if [ -d "$cert_dir" ]; then
echo " ✓ Certificate exists: $(basename $cert_dir)"
else
echo " ⚠️ Certificate missing: $(basename $cert_dir)"
ALL_EXIST=false
fi
done
if [ "$ALL_EXIST" = false ]; then
echo ""
echo " To generate missing certificates:"
echo " certbot certonly --manual --preferred-challenges dns -d '*.nest.mcrn.ar'"
echo " certbot certonly --manual --preferred-challenges dns -d '*.mcrn.ar'"
echo ""
echo " After generating, reload nginx:"
echo " systemctl reload nginx"
fi
# =============================================================================
# Done
# =============================================================================
echo ""
echo "=== Setup Complete ==="
echo ""
echo "System configuration applied successfully."
echo ""
echo "Next steps:"
echo " 1. If $ACTUAL_USER was added to docker group, they must:"
echo " - Log out and log back in"
echo " - Or run: newgrp docker"
echo ""
echo " 2. Generate SSL certificates if missing (see above)"
echo ""
echo " 3. Deploy application:"
echo " su - $ACTUAL_USER"
echo " cd $ACTUAL_HOME/core_nest/ctrl"
echo " ./deploy.sh"
echo ""

48
mainroom/ctrl/setup.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Local setup - prepare .env files
#
# This script runs LOCALLY to create .env files from examples.
# For server setup, use: ssh server 'cd ~/core_nest/server && ./setup.sh'
#
# Usage:
# ./setup.sh
set -e
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx/server)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ] && [ "$dirname" != "server" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
echo "=== Local Environment Setup ==="
echo ""
# Create .env files from examples
echo "Creating .env files from examples..."
for service in "${SERVICE_DIRS[@]}"; do
if [ ! -f "$service/.env" ] && [ -f "$service/.env.example" ]; then
cp "$service/.env.example" "$service/.env"
echo " Created $service/.env"
elif [ -f "$service/.env" ]; then
echo " $service/.env already exists"
fi
done
echo ""
echo "=== Local Setup Complete ==="
echo ""
echo "Local development:"
echo " - Edit .env files for local values"
echo " - Run: ./start.sh"
echo ""
echo "Server deployment:"
echo " 1. Deploy: ./deploy.sh"
echo " 2. On server: ssh server 'cd ~/core_nest/server && ./setup.sh'"

102
mainroom/ctrl/start.sh Executable file
View File

@@ -0,0 +1,102 @@
#!/bin/bash
# Start core_nest services
#
# Usage:
# ./start.sh # Start all (foreground, see logs)
# ./start.sh <service> # Start specific service (e.g., amar, pawprint)
# ./start.sh -d # Start all (detached)
# ./start.sh --build # Start with rebuild
# ./start.sh -d --build # Start detached with rebuild
# ./start.sh --with-nginx # Start with nginx container (local dev only)
set -e
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
# Export core_nest/.env vars so child docker-compose files can use them
if [ -f ".env" ]; then
set -a
source .env
set +a
fi
TARGET="all"
DETACH=""
BUILD=""
WITH_NGINX=""
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
for arg in "$@"; do
case $arg in
-d|--detached) DETACH="-d" ;;
--build) BUILD="--build" ;;
--with-nginx) WITH_NGINX="true" ;;
all) TARGET="all" ;;
*)
# Check if it's a valid service directory
if [[ " ${SERVICE_DIRS[@]} " =~ " ${arg} " ]]; then
TARGET="$arg"
fi
;;
esac
done
start_service() {
local service=$1
echo "Starting $service..."
cd "$service"
# If --with-nginx and service is pawprint, include nginx compose
if [ "$WITH_NGINX" = "true" ] && [ "$service" = "pawprint" ]; then
echo " Including nginx container..."
DOCKER_BUILDKIT=0 COMPOSE_DOCKER_CLI_BUILD=0 docker compose -f docker-compose.yml -f docker-compose.nginx.yml up $DETACH $BUILD
else
DOCKER_BUILDKIT=0 COMPOSE_DOCKER_CLI_BUILD=0 docker compose up $DETACH $BUILD
fi
cd ..
[ -n "$DETACH" ] && echo " $service started"
}
if [ "$TARGET" = "all" ]; then
if [ -z "$DETACH" ]; then
# Foreground mode: start all services in parallel
echo "Starting all services (foreground): ${SERVICE_DIRS[*]}"
PIDS=()
for service in "${SERVICE_DIRS[@]}"; do
cd "$service"
DOCKER_BUILDKIT=0 COMPOSE_DOCKER_CLI_BUILD=0 docker compose up $BUILD &
PIDS+=($!)
cd ..
done
# Wait for all processes
wait "${PIDS[@]}"
else
# Detached mode: start sequentially
for service in "${SERVICE_DIRS[@]}"; do
start_service "$service"
echo ""
done
fi
elif [[ " ${SERVICE_DIRS[@]} " =~ " ${TARGET} " ]]; then
start_service "$TARGET"
else
echo "Usage: ./start.sh [${SERVICE_DIRS[*]}|all] [-d|--detached] [--build]"
exit 1
fi
if [ -n "$DETACH" ]; then
echo ""
echo "=== Services Started ==="
echo ""
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(core_nest|NAMES)"
fi

42
mainroom/ctrl/status.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Show core_nest status
#
# Usage:
# ./status.sh
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
# Export core_nest/.env vars
if [ -f ".env" ]; then
export $(grep -v '^#' .env | grep -v '^$' | xargs)
fi
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
# NEST_NAME comes from core_nest/.env
NEST_NAME=${NEST_NAME:-core_nest}
echo "=== Nest Status: $NEST_NAME ==="
echo ""
echo "Containers:"
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(${NEST_NAME}|NAMES)" || echo " No containers running"
echo ""
echo "Networks:"
docker network ls | grep -E "(${NEST_NAME}|NETWORK)" || echo " No networks"
echo ""
echo "Volumes:"
docker volume ls | grep -E "(${NEST_NAME}|VOLUME)" || echo " No volumes"
echo ""

50
mainroom/ctrl/stop.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash
# Stop core_nest services
#
# Usage:
# ./stop.sh # Stop all
# ./stop.sh <service> # Stop specific service
set -e
# Change to parent directory (services are in ../service_name)
cd "$(dirname "$0")/.."
# Export core_nest/.env vars so child docker-compose files can use them
if [ -f ".env" ]; then
export $(grep -v '^#' .env | grep -v '^$' | xargs)
fi
TARGET=${1:-all}
SERVICE_DIRS=()
# Find all service directories (have docker-compose.yml, exclude ctrl/nginx)
for dir in */; do
dirname="${dir%/}"
if [ -f "$dir/docker-compose.yml" ] && [ "$dirname" != "ctrl" ] && [ "$dirname" != "nginx" ]; then
SERVICE_DIRS+=("$dirname")
fi
done
stop_service() {
local service=$1
echo "Stopping $service..."
cd "$service"
docker compose down
cd ..
}
if [ "$TARGET" = "all" ]; then
# Stop all services in reverse order (dependencies first)
for ((i=${#SERVICE_DIRS[@]}-1; i>=0; i--)); do
stop_service "${SERVICE_DIRS[$i]}"
done
elif [[ " ${SERVICE_DIRS[@]} " =~ " ${TARGET} " ]]; then
stop_service "$TARGET"
else
echo "Usage: ./stop.sh [${SERVICE_DIRS[*]}|all]"
exit 1
fi
echo ""
echo "=== Services Stopped ==="

83
mainroom/ctrl/sync-tests.sh Executable file
View File

@@ -0,0 +1,83 @@
#!/bin/bash
# Sync tests to ward tester (standalone, no coupling)
# Configure paths via environment variables
#
# Usage:
# # Set env vars
# export TEST_SOURCE_PATH=~/wdir/ama/amar_django_back/tests/contracts
# export WARD_TESTS_PATH=~/wdir/ama/pawprint/ward/tools/tester/tests
#
# # Run sync
# ./sync-tests-local.sh
set -e
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
SILENT_FAIL="${SILENT_FAIL:-false}"
# Load from .env.sync if it exists
if [ -f "$SCRIPT_DIR/.env.sync" ]; then
source "$SCRIPT_DIR/.env.sync"
fi
# Check required vars
if [ -z "$TEST_SOURCE_PATH" ]; then
if [ "$SILENT_FAIL" = "true" ]; then
exit 0
fi
echo "Error: TEST_SOURCE_PATH not set"
echo ""
echo "Set environment variables:"
echo " export TEST_SOURCE_PATH=~/wdir/ama/amar_django_back/tests/contracts"
echo " export WARD_TESTS_PATH=~/wdir/ama/pawprint/ward/tools/tester/tests"
echo ""
echo "Or create ctrl/.env.sync with these variables"
exit 1
fi
if [ -z "$WARD_TESTS_PATH" ]; then
if [ "$SILENT_FAIL" = "true" ]; then
exit 0
fi
echo "Error: WARD_TESTS_PATH not set"
exit 1
fi
# Expand paths
SOURCE=$(eval echo "$TEST_SOURCE_PATH")
TARGET=$(eval echo "$WARD_TESTS_PATH")
if [ ! -d "$SOURCE" ]; then
if [ "$SILENT_FAIL" = "true" ]; then
exit 0
fi
echo "Error: Source directory not found: $SOURCE"
exit 1
fi
echo "=== Syncing Contract Tests ==="
echo ""
echo "Source: $SOURCE"
echo "Target: $TARGET"
echo ""
# Create target if it doesn't exist
mkdir -p "$TARGET"
# Sync tests (use shared exclude file)
rsync -av --delete \
--exclude-from="$SCRIPT_DIR/.exclude" \
"$SOURCE/" \
"$TARGET/"
echo ""
echo "[OK] Tests synced successfully"
echo ""
echo "Changes are immediately visible in Docker (volume mount)"
echo "Just refresh your browser - no restart needed!"
echo ""
# Count test files
TEST_COUNT=$(find "$TARGET" -name "test_*.py" | wc -l)
echo "Total test files: $TEST_COUNT"
echo ""

13
mainroom/link/Dockerfile Normal file
View File

@@ -0,0 +1,13 @@
FROM python:3.11-slim
WORKDIR /app
# Install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
# Run
CMD ["python", "main.py"]

120
mainroom/link/README.md Normal file
View File

@@ -0,0 +1,120 @@
# Link Nest - Adapter Layer
Provides framework-agnostic data navigation between managed apps (AMAR) and pawprint.
## Status: Initial Implementation ✅
**Working:**
- ✅ FastAPI service with adapter pattern
- ✅ BaseAdapter interface for pluggable frameworks
- ✅ DjangoAdapter with AMAR database queries
- ✅ Docker build and container starts
-`/health` endpoint (adapter loads successfully)
-`/api/queries` endpoint (lists available queries)
**Pending:**
- ⏳ Database connection (needs DB_HOST env var fix)
- ⏳ Complete all entity queries (Pet, Vet, ServiceRequest, etc.)
- ⏳ Ward integration (consume JSON and render graph)
## Architecture
```
Managed App (AMAR) ←─── link_nest ───→ Pawprint (Ward)
↓ ↓ ↓
Database Adapter Layer Graph Renderer
(SQL → JSON) (JSON → SVG)
```
**JSON Contract:**
```json
{
"nodes": [
{"id": "User_123", "type": "User", "label": "john", "data": {...}}
],
"edges": [
{"from": "User_123", "to": "PetOwner_456", "label": "has profile"}
],
"summary": {
"title": "User #123",
"credentials": "john | Password: Amar2025!",
"fields": {"Email": "john@example.com"}
}
}
```
## Endpoints
- `GET /health` - Health check with adapter status
- `GET /api/queries` - List available predefined queries
- `GET /api/navigate?query=user_with_pets` - Query mode
- `GET /api/navigate?entity=User&id=123` - Entity navigation mode
## Available Queries
1. `user_with_pets` - User with Pet ownership
2. `user_with_requests` - User with ServiceRequests
## Usage
```bash
# Start (from core_nest/ctrl)
./start.sh link_nest -d --build
# Test
curl http://localhost:8100/health
curl http://localhost:8100/api/queries
curl "http://localhost:8100/api/navigate?query=user_with_pets"
# Logs
docker logs core_nest_link_nest
```
## Environment Variables
From `core_nest/.env`:
- `NEST_NAME` - Container naming
- `NETWORK_NAME` - Docker network
- `DB_HOST` - Database host (needs fix: should point to db container)
- `DB_PORT` - Database port
- `DB_NAME` - Database name
- `DB_USER` - Database user
- `DB_PASSWORD` - Database password
- `ADAPTER_TYPE` - Adapter to use (default: django)
## Next Steps
1. **Fix DB connection** - Set correct DB_HOST in core_nest/.env
2. **Complete queries** - Add remaining entity types
3. **Ward integration** - Create ward consumer for JSON
4. **Add graphviz rendering** - Move from data_browse reference
5. **Test end-to-end** - Query → JSON → SVG → Display
## Files
```
link_nest/
├── README.md # This file
├── main.py # FastAPI app with endpoints
├── requirements.txt # Python dependencies
├── Dockerfile # Container build
├── docker-compose.yml # Service definition
└── adapters/
├── __init__.py # BaseAdapter interface
└── django.py # DjangoAdapter implementation
```
## Design Goals
**Framework-agnostic** - Works with Django, Rails, Express, etc.
**Decoupled** - Managed app owns data, link_nest translates
**Pluggable** - Adapters for different frameworks
**Authenticated** - Ready for remote deployment
**Incremental** - Build and test each piece
## Reference
Previous approach (databrowse direct DB) saved in:
- Branch: `ref/databrowse-direct-db` (ward repo)
- Problem: Tight coupling, won't work remote
- Solution: This adapter pattern

View File

@@ -0,0 +1,43 @@
"""
Adapters for different managed app frameworks.
"""
from typing import Dict, List, Any, Optional
from abc import ABC, abstractmethod
class BaseAdapter(ABC):
"""Base adapter interface."""
def __init__(self, config: Dict[str, Any]):
"""
Initialize adapter with configuration.
Args:
config: Database connection or API endpoint configuration
"""
self.config = config
@abstractmethod
def navigate(
self,
query: Optional[str] = None,
entity: Optional[str] = None,
id: Optional[int] = None
) -> Dict[str, Any]:
"""
Navigate data graph.
Returns:
{
"nodes": [{"id": str, "type": str, "label": str, "data": dict}],
"edges": [{"from": str, "to": str, "label": str}],
"summary": {"title": str, "credentials": str|None, "fields": dict}
}
"""
pass
@abstractmethod
def get_queries(self) -> List[str]:
"""Return list of available query names."""
pass

View File

@@ -0,0 +1,235 @@
"""
Django adapter for AMAR.
Queries AMAR's PostgreSQL database directly.
"""
from typing import Dict, List, Any, Optional
from sqlalchemy import create_engine, text
from . import BaseAdapter
class DjangoAdapter(BaseAdapter):
"""Adapter for Django/AMAR."""
def __init__(self, config: Dict[str, Any]):
super().__init__(config)
self.engine = self._create_engine()
def _create_engine(self):
"""Create SQLAlchemy engine from config."""
db_url = (
f"postgresql://{self.config['user']}:{self.config['password']}"
f"@{self.config['host']}:{self.config['port']}/{self.config['name']}"
)
return create_engine(db_url, pool_pre_ping=True)
def _execute(self, sql: str) -> List[Dict[str, Any]]:
"""Execute SQL and return results as list of dicts."""
with self.engine.connect() as conn:
result = conn.execute(text(sql))
rows = result.fetchall()
columns = result.keys()
return [dict(zip(columns, row)) for row in rows]
def get_queries(self) -> List[str]:
"""Available predefined queries."""
return [
"user_with_pets",
"user_with_requests",
]
def navigate(
self,
query: Optional[str] = None,
entity: Optional[str] = None,
id: Optional[int] = None
) -> Dict[str, Any]:
"""Navigate data graph."""
if query:
return self._query_mode(query)
elif entity and id:
return self._entity_mode(entity, id)
else:
raise ValueError("Must provide either query or entity+id")
def _query_mode(self, query_name: str) -> Dict[str, Any]:
"""Execute predefined query."""
if query_name == "user_with_pets":
sql = """
SELECT
u.id as user_id, u.username, u.email,
po.id as petowner_id, po.first_name, po.last_name, po.phone,
p.id as pet_id, p.name as pet_name, p.pet_type, p.age
FROM auth_user u
JOIN mascotas_petowner po ON po.user_id = u.id
JOIN mascotas_pet p ON p.owner_id = po.id
WHERE p.deleted = false
LIMIT 1
"""
elif query_name == "user_with_requests":
sql = """
SELECT
u.id as user_id, u.username, u.email,
po.id as petowner_id, po.first_name, po.last_name,
sr.id as request_id, sr.state, sr.created_at
FROM auth_user u
JOIN mascotas_petowner po ON po.user_id = u.id
JOIN solicitudes_servicerequest sr ON sr.petowner_id = po.id
WHERE sr.deleted = false
ORDER BY sr.created_at DESC
LIMIT 1
"""
else:
raise ValueError(f"Unknown query: {query_name}")
rows = self._execute(sql)
if not rows:
return self._empty_response()
return self._rows_to_graph(rows[0])
def _entity_mode(self, entity: str, id: int) -> Dict[str, Any]:
"""Navigate to specific entity."""
if entity == "User":
sql = f"""
SELECT
u.id as user_id, u.username, u.email,
po.id as petowner_id, po.first_name, po.last_name, po.phone
FROM auth_user u
LEFT JOIN mascotas_petowner po ON po.user_id = u.id
WHERE u.id = {id}
"""
else:
raise ValueError(f"Unknown entity: {entity}")
rows = self._execute(sql)
if not rows:
return self._empty_response()
return self._rows_to_graph(rows[0])
def _rows_to_graph(self, row: Dict[str, Any]) -> Dict[str, Any]:
"""Convert SQL row to graph structure."""
nodes = []
edges = []
# User node
if "user_id" in row and row["user_id"]:
nodes.append({
"id": f"User_{row['user_id']}",
"type": "User",
"label": row.get("username") or row.get("email", ""),
"data": {
"id": row["user_id"],
"username": row.get("username"),
"email": row.get("email"),
}
})
# PetOwner node
if "petowner_id" in row and row["petowner_id"]:
name = f"{row.get('first_name', '')} {row.get('last_name', '')}".strip()
nodes.append({
"id": f"PetOwner_{row['petowner_id']}",
"type": "PetOwner",
"label": name or "PetOwner",
"data": {
"id": row["petowner_id"],
"first_name": row.get("first_name"),
"last_name": row.get("last_name"),
"phone": row.get("phone"),
}
})
if "user_id" in row and row["user_id"]:
edges.append({
"from": f"User_{row['user_id']}",
"to": f"PetOwner_{row['petowner_id']}",
"label": "has profile"
})
# Pet node
if "pet_id" in row and row["pet_id"]:
nodes.append({
"id": f"Pet_{row['pet_id']}",
"type": "Pet",
"label": row.get("pet_name", "Pet"),
"data": {
"id": row["pet_id"],
"name": row.get("pet_name"),
"pet_type": row.get("pet_type"),
"age": row.get("age"),
}
})
if "petowner_id" in row and row["petowner_id"]:
edges.append({
"from": f"PetOwner_{row['petowner_id']}",
"to": f"Pet_{row['pet_id']}",
"label": "owns"
})
# ServiceRequest node
if "request_id" in row and row["request_id"]:
nodes.append({
"id": f"ServiceRequest_{row['request_id']}",
"type": "ServiceRequest",
"label": f"Request #{row['request_id']}",
"data": {
"id": row["request_id"],
"state": row.get("state"),
"created_at": str(row.get("created_at", "")),
}
})
if "petowner_id" in row and row["petowner_id"]:
edges.append({
"from": f"PetOwner_{row['petowner_id']}",
"to": f"ServiceRequest_{row['request_id']}",
"label": "requested"
})
# Build summary from first User node
summary = self._build_summary(nodes)
return {
"nodes": nodes,
"edges": edges,
"summary": summary
}
def _build_summary(self, nodes: List[Dict]) -> Dict[str, Any]:
"""Build summary from nodes."""
# Find User node
user_node = next((n for n in nodes if n["type"] == "User"), None)
if user_node:
data = user_node["data"]
return {
"title": f"User #{data['id']}",
"credentials": f"{data.get('username', 'N/A')} | Password: Amar2025!",
"fields": {
"Email": data.get("email", "N/A"),
"Username": data.get("username", "N/A"),
}
}
# Fallback
return {
"title": "No data",
"credentials": None,
"fields": {}
}
def _empty_response(self) -> Dict[str, Any]:
"""Return empty response structure."""
return {
"nodes": [],
"edges": [],
"summary": {
"title": "No data found",
"credentials": None,
"fields": {}
}
}

View File

@@ -0,0 +1,25 @@
services:
link_nest:
build:
context: .
dockerfile: Dockerfile
container_name: ${NEST_NAME}_link_nest
ports:
- "8100:8000"
environment:
- PORT=8000
- ADAPTER_TYPE=${ADAPTER_TYPE:-django}
- DB_HOST=${DB_HOST}
- DB_PORT=${DB_PORT}
- DB_NAME=${DB_NAME}
- DB_USER=${DB_USER}
- DB_PASSWORD=${DB_PASSWORD}
volumes:
- ./:/app
networks:
- default
networks:
default:
external: true
name: ${NETWORK_NAME}

105
mainroom/link/main.py Normal file
View File

@@ -0,0 +1,105 @@
"""
Link Nest - Adapter layer between managed apps and pawprint.
Exposes standardized JSON endpoints for data navigation.
Framework-agnostic via pluggable adapters.
"""
import os
from typing import Optional
from fastapi import FastAPI, HTTPException
app = FastAPI(title="Link Nest", version="0.1.0")
# Lazy-loaded adapter instance
_adapter = None
def get_adapter():
"""Get or create adapter instance."""
global _adapter
if _adapter is None:
adapter_type = os.getenv("ADAPTER_TYPE", "django")
# Database config from environment
db_config = {
"host": os.getenv("DB_HOST", "localhost"),
"port": int(os.getenv("DB_PORT", "5432")),
"name": os.getenv("DB_NAME", "amarback"),
"user": os.getenv("DB_USER", "postgres"),
"password": os.getenv("DB_PASSWORD", ""),
}
if adapter_type == "django":
from adapters.django import DjangoAdapter
_adapter = DjangoAdapter(db_config)
else:
raise ValueError(f"Unknown adapter type: {adapter_type}")
return _adapter
@app.get("/health")
def health():
"""Health check."""
adapter_type = os.getenv("ADAPTER_TYPE", "django")
# Test adapter connection
adapter_ok = False
try:
adapter = get_adapter()
adapter_ok = True
except Exception as e:
print(f"Adapter error: {e}")
return {
"status": "ok" if adapter_ok else "degraded",
"service": "link-nest",
"adapter": adapter_type,
"adapter_loaded": adapter_ok,
}
@app.get("/api/queries")
def list_queries():
"""List available predefined queries."""
adapter = get_adapter()
return {
"queries": adapter.get_queries()
}
@app.get("/api/navigate")
def navigate(query: Optional[str] = None, entity: Optional[str] = None, id: Optional[int] = None):
"""
Navigate data graph.
Query mode: ?query=user_with_pets
Navigation mode: ?entity=User&id=123
Returns:
{
"nodes": [...],
"edges": [...],
"summary": {...}
}
"""
try:
adapter = get_adapter()
result = adapter.navigate(query=query, entity=entity, id=id)
return result
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
print(f"Navigate error: {e}")
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"main:app",
host="0.0.0.0",
port=int(os.getenv("PORT", "8000")),
reload=True,
)

View File

@@ -0,0 +1,4 @@
fastapi
uvicorn[standard]
psycopg2-binary
sqlalchemy

311
mainroom/sbwrapper/README.md Executable file
View File

@@ -0,0 +1,311 @@
# Pawprint Wrapper - Development Tools Sidebar
A collapsible sidebar that provides development and testing tools for any pawprint-managed nest (like amar) without interfering with the managed application.
## Features
### 👤 Quick Login
- Switch between test users with one click
- Pre-configured admin, vet, and tutor accounts
- Automatic JWT token management
- Shows currently logged-in user
### 🌍 Environment Info
- Display backend and frontend URLs
- Nest name and deployment info
- Quick reference during development
### ⌨️ Keyboard Shortcuts
- **Ctrl+Shift+P** - Toggle sidebar
### 💾 State Persistence
- Sidebar remembers expanded/collapsed state
- Persists across page reloads
## Files
```
wrapper/
├── index.html # Standalone demo
├── sidebar.css # Sidebar styling
├── sidebar.js # Sidebar logic
├── config.json # Configuration (users, URLs)
└── README.md # This file
```
## Quick Start
### Standalone Demo
Open `index.html` in your browser to see the sidebar in action:
```bash
cd core_nest/wrapper
python3 -m http.server 8080
# Open http://localhost:8080
```
Click the toggle button on the right edge or press **Ctrl+Shift+P**.
### Integration with Your App
Add these two lines to your HTML:
```html
<link rel="stylesheet" href="/wrapper/sidebar.css">
<script src="/wrapper/sidebar.js"></script>
```
The sidebar will automatically:
1. Load configuration from `/wrapper/config.json`
2. Create the sidebar UI
3. Setup keyboard shortcuts
4. Check for existing logged-in users
## Configuration
Edit `config.json` to customize:
```json
{
"nest_name": "amar",
"wrapper": {
"enabled": true,
"environment": {
"backend_url": "http://localhost:8000",
"frontend_url": "http://localhost:3000"
},
"users": [
{
"id": "admin",
"label": "Admin",
"username": "admin@test.com",
"password": "Amar2025!",
"icon": "👑",
"role": "ADMIN"
}
]
}
}
```
### User Fields
- **id**: Unique identifier for the user
- **label**: Display name in the sidebar
- **username**: Login username (email)
- **password**: Login password
- **icon**: Emoji icon to display
- **role**: User role (ADMIN, VET, USER)
## How It Works
### Login Flow
1. User clicks a user card in the sidebar
2. `sidebar.js` calls `POST {backend_url}/api/token/` with credentials
3. Backend returns JWT tokens: `{ access, refresh, details }`
4. Tokens stored in localStorage
5. Page reloads, user is now logged in
### Token Storage
Tokens are stored in localStorage:
- `access_token` - JWT access token
- `refresh_token` - JWT refresh token
- `user_info` - User metadata (username, label, role)
### Logout Flow
1. User clicks "Logout" button
2. Tokens removed from localStorage
3. Page reloads, user is logged out
## Docker Integration
### Approach 1: Static Files
Mount wrapper as static files in docker-compose:
```yaml
services:
frontend:
volumes:
- ./ctrl/wrapper:/app/public/wrapper:ro
```
Then in your HTML:
```html
<link rel="stylesheet" href="/wrapper/sidebar.css">
<script src="/wrapper/sidebar.js"></script>
```
### Approach 2: Nginx Injection
Use nginx to inject the sidebar script automatically:
```nginx
location / {
sub_filter '</head>' '<link rel="stylesheet" href="/wrapper/sidebar.css"><script src="/wrapper/sidebar.js"></script></head>';
sub_filter_once on;
proxy_pass http://frontend:3000;
}
location /wrapper/ {
alias /app/wrapper/;
}
```
### Approach 3: Wrapper Service
Create a dedicated wrapper service:
```yaml
services:
wrapper:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./ctrl/wrapper:/usr/share/nginx/html/wrapper
environment:
- MANAGED_APP_URL=http://frontend:3000
```
See `../WRAPPER_DESIGN.md` for detailed Docker integration patterns.
## Customization
### Styling
Edit `sidebar.css` to customize appearance:
```css
:root {
--sidebar-width: 320px;
--sidebar-bg: #1e1e1e;
--sidebar-text: #e0e0e0;
--sidebar-accent: #007acc;
}
```
### Add New Panels
Add HTML to `getSidebarHTML()` in `sidebar.js`:
```javascript
getSidebarHTML() {
return `
...existing panels...
<div class="panel">
<h3>🆕 My New Panel</h3>
<p>Custom content here</p>
</div>
`;
}
```
### Add New Features
Extend the `PawprintSidebar` class in `sidebar.js`:
```javascript
class PawprintSidebar {
async fetchJiraInfo() {
const response = await fetch('https://artery.mcrn.ar/jira/VET-123');
const data = await response.json();
// Update UI with data
}
}
```
## API Requirements
The sidebar expects these endpoints from your backend:
### POST /api/token/
Login endpoint that returns JWT tokens.
**Request:**
```json
{
"username": "admin@test.com",
"password": "Amar2025!"
}
```
**Response:**
```json
{
"access": "eyJ0eXAiOiJKV1QiLCJhbGc...",
"refresh": "eyJ0eXAiOiJKV1QiLCJhbGc...",
"details": {
"role": "ADMIN",
"id": 1,
"name": "Admin User"
}
}
```
## Troubleshooting
### Sidebar not appearing
- Check browser console for errors
- Verify `sidebar.js` and `sidebar.css` are loaded
- Check that `config.json` is accessible
### Login fails
- Verify backend URL in `config.json`
- Check backend is running
- Verify credentials are correct
- Check CORS settings on backend
### Tokens not persisting
- Check localStorage is enabled
- Verify domain matches between sidebar and app
- Check browser privacy settings
## Security Considerations
⚠️ **Important:** This sidebar is for **development/testing only**.
- Passwords are stored in plain text in `config.json`
- Do NOT use in production
- Do NOT commit real credentials to git
- Add `config.json` to `.gitignore` if it contains sensitive data
For production:
- Disable wrapper via `"enabled": false` in config
- Use environment variables for URLs
- Remove or secure test user credentials
## Future Enhancements
Planned features (see `../WRAPPER_DESIGN.md`):
- 📋 **Jira Info Panel** - Fetch ticket details from artery
- 📊 **Logs Viewer** - Stream container logs
- 🎨 **Theme Switcher** - Light/dark mode
- 🔍 **Search** - Quick search across tools
- ⚙️ **Settings** - Customize sidebar behavior
- 📱 **Mobile Support** - Responsive design improvements
## Related Documentation
- `../WRAPPER_DESIGN.md` - Complete architecture design
- `../../../pawprint/CLAUDE.md` - Pawprint framework overview
- `../../README.md` - Core nest documentation
## Contributing
To add a new panel or feature:
1. Add HTML in `getSidebarHTML()`
2. Add styling in `sidebar.css`
3. Add logic as methods on `PawprintSidebar` class
4. Update this README with usage instructions
## License
Part of the Pawprint development tools ecosystem.

40
mainroom/sbwrapper/config.json Executable file
View File

@@ -0,0 +1,40 @@
{
"nest_name": "amar",
"wrapper": {
"enabled": true,
"environment": {
"backend_url": "http://localhost:8000",
"frontend_url": "http://localhost:3000"
},
"users": [
{
"id": "admin",
"label": "Admin",
"username": "admin@test.com",
"password": "Amar2025!",
"icon": "👑",
"role": "ADMIN"
},
{
"id": "vet1",
"label": "Vet 1",
"username": "vet@test.com",
"password": "Amar2025!",
"icon": "🩺",
"role": "VET"
},
{
"id": "tutor1",
"label": "Tutor 1",
"username": "tutor@test.com",
"password": "Amar2025!",
"icon": "🐶",
"role": "USER"
}
],
"jira": {
"ticket_id": "VET-535",
"epic": "EPIC-51.3"
}
}
}

197
mainroom/sbwrapper/index.html Executable file
View File

@@ -0,0 +1,197 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Pawprint Wrapper - Demo</title>
<link rel="stylesheet" href="sidebar.css">
<style>
/* Demo page styles */
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}
#demo-content {
padding: 40px;
max-width: 800px;
margin: 0 auto;
transition: margin-right 0.3s ease;
}
#pawprint-sidebar.expanded ~ #demo-content {
margin-right: var(--sidebar-width);
}
.demo-header {
margin-bottom: 40px;
}
.demo-header h1 {
font-size: 32px;
margin-bottom: 8px;
color: #1a1a1a;
}
.demo-header p {
color: #666;
font-size: 16px;
}
.demo-section {
margin-bottom: 32px;
padding: 24px;
background: #f5f5f5;
border-radius: 8px;
border-left: 4px solid #007acc;
}
.demo-section h2 {
font-size: 20px;
margin-bottom: 16px;
color: #1a1a1a;
}
.demo-section p {
color: #444;
line-height: 1.6;
margin-bottom: 12px;
}
.demo-section code {
background: #e0e0e0;
padding: 2px 6px;
border-radius: 3px;
font-size: 14px;
font-family: 'Monaco', 'Courier New', monospace;
}
.demo-section ul {
margin-left: 20px;
color: #444;
}
.demo-section li {
margin-bottom: 8px;
line-height: 1.6;
}
.status-box {
padding: 16px;
background: white;
border: 1px solid #ddd;
border-radius: 6px;
margin-top: 16px;
}
.status-box strong {
color: #007acc;
}
.kbd {
display: inline-block;
padding: 3px 8px;
background: #fff;
border: 1px solid #ccc;
border-radius: 4px;
box-shadow: 0 2px 0 #bbb;
font-family: 'Monaco', monospace;
font-size: 12px;
margin: 0 2px;
}
</style>
</head>
<body>
<div id="demo-content">
<div class="demo-header">
<h1>🐾 Pawprint Wrapper</h1>
<p>Development tools sidebar for any pawprint-managed nest</p>
</div>
<div class="demo-section">
<h2>👋 Quick Start</h2>
<p>
This is a standalone demo of the Pawprint Wrapper sidebar.
Click the toggle button on the right edge of the screen, or press
<span class="kbd">Ctrl</span> + <span class="kbd">Shift</span> + <span class="kbd">P</span>
to open the sidebar.
</p>
</div>
<div class="demo-section">
<h2>🎯 Features</h2>
<ul>
<li><strong>Quick Login:</strong> Switch between test users with one click</li>
<li><strong>Environment Info:</strong> See current backend/frontend URLs</li>
<li><strong>JWT Token Management:</strong> Automatic token storage and refresh</li>
<li><strong>Keyboard Shortcuts:</strong> Ctrl+Shift+P to toggle</li>
<li><strong>Persistent State:</strong> Sidebar remembers expanded/collapsed state</li>
</ul>
</div>
<div class="demo-section">
<h2>👤 Test Users</h2>
<p>Try logging in as one of these test users (from <code>config.json</code>):</p>
<ul>
<li>👑 <strong>Admin</strong> - admin@test.com / Amar2025!</li>
<li>🩺 <strong>Vet 1</strong> - vet@test.com / Amar2025!</li>
<li>🐶 <strong>Tutor 1</strong> - tutor@test.com / Amar2025!</li>
</ul>
<div class="status-box">
<strong>Note:</strong> In this demo, login will fail because there's no backend running.
When integrated with a real AMAR instance, clicking a user card will:
<ol style="margin-top: 8px; margin-left: 20px;">
<li>Call <code>POST /api/token/</code> with username/password</li>
<li>Store access & refresh tokens in localStorage</li>
<li>Reload the page with the user logged in</li>
</ol>
</div>
</div>
<div class="demo-section">
<h2>🔧 How It Works</h2>
<p>The sidebar is implemented as three files:</p>
<ul>
<li><code>sidebar.css</code> - Visual styling (dark theme, animations)</li>
<li><code>sidebar.js</code> - Logic (login, logout, toggle, state management)</li>
<li><code>config.json</code> - Configuration (users, URLs, nest info)</li>
</ul>
<p style="margin-top: 16px;">
To integrate with your app, simply include these in your HTML:
</p>
<div style="background: #fff; padding: 12px; border-radius: 4px; margin-top: 8px;">
<code style="display: block; font-size: 13px;">
&lt;link rel="stylesheet" href="/wrapper/sidebar.css"&gt;<br>
&lt;script src="/wrapper/sidebar.js"&gt;&lt;/script&gt;
</code>
</div>
</div>
<div class="demo-section">
<h2>🚀 Next Steps</h2>
<p>Planned enhancements:</p>
<ul>
<li>📋 <strong>Jira Info Panel:</strong> Fetch and display ticket details from artery</li>
<li>📊 <strong>Logs Viewer:</strong> Stream container logs via WebSocket</li>
<li>🎨 <strong>Theme Switcher:</strong> Light/dark theme toggle</li>
<li>🔍 <strong>Search:</strong> Quick search across users and tools</li>
<li>⚙️ <strong>Settings:</strong> Customize sidebar behavior</li>
</ul>
</div>
<div class="demo-section">
<h2>📚 Documentation</h2>
<p>
See <code>WRAPPER_DESIGN.md</code> in <code>core_nest/</code> for the complete
architecture design, including Docker integration patterns and alternative approaches.
</p>
</div>
</div>
<!-- Load the sidebar -->
<script src="sidebar.js"></script>
</body>
</html>

296
mainroom/sbwrapper/sidebar.css Executable file
View File

@@ -0,0 +1,296 @@
/* Pawprint Wrapper - Sidebar Styles */
:root {
--sidebar-width: 320px;
--sidebar-bg: #1e1e1e;
--sidebar-text: #e0e0e0;
--sidebar-accent: #007acc;
--sidebar-border: #333;
--sidebar-shadow: 0 0 20px rgba(0,0,0,0.5);
--card-bg: #2a2a2a;
--card-hover: #3a3a3a;
--success: #4caf50;
--error: #f44336;
}
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
margin: 0;
padding: 0;
}
/* Sidebar Container */
#pawprint-sidebar {
position: fixed;
right: 0;
top: 0;
width: var(--sidebar-width);
height: 100vh;
background: var(--sidebar-bg);
color: var(--sidebar-text);
box-shadow: var(--sidebar-shadow);
transform: translateX(100%);
transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1);
z-index: 9999;
overflow-y: auto;
overflow-x: hidden;
display: flex;
flex-direction: column;
}
#pawprint-sidebar.expanded {
transform: translateX(0);
}
/* Toggle Button */
#sidebar-toggle {
position: fixed;
right: 0;
top: 50%;
transform: translateY(-50%);
background: var(--sidebar-bg);
color: var(--sidebar-text);
border: 1px solid var(--sidebar-border);
border-right: none;
border-radius: 8px 0 0 8px;
padding: 12px 8px;
cursor: pointer;
z-index: 10000;
font-size: 16px;
transition: background 0.2s;
box-shadow: -2px 0 8px rgba(0,0,0,0.3);
}
#sidebar-toggle:hover {
background: var(--card-hover);
}
#sidebar-toggle .icon {
display: block;
transition: transform 0.3s;
}
#pawprint-sidebar.expanded ~ #sidebar-toggle .icon {
transform: scaleX(-1);
}
/* Header */
.sidebar-header {
padding: 20px;
border-bottom: 1px solid var(--sidebar-border);
background: linear-gradient(135deg, #1a1a1a 0%, #2a2a2a 100%);
}
.sidebar-header h2 {
font-size: 18px;
font-weight: 600;
margin-bottom: 4px;
color: var(--sidebar-accent);
}
.sidebar-header .nest-name {
font-size: 12px;
opacity: 0.7;
text-transform: uppercase;
letter-spacing: 1px;
}
/* Content */
.sidebar-content {
flex: 1;
padding: 20px;
overflow-y: auto;
}
/* Panel */
.panel {
margin-bottom: 24px;
padding: 16px;
background: var(--card-bg);
border-radius: 8px;
border: 1px solid var(--sidebar-border);
}
.panel h3 {
font-size: 14px;
font-weight: 600;
margin-bottom: 12px;
color: var(--sidebar-accent);
display: flex;
align-items: center;
gap: 8px;
}
/* Current User Display */
.current-user {
padding: 12px;
background: rgba(76, 175, 80, 0.1);
border: 1px solid rgba(76, 175, 80, 0.3);
border-radius: 6px;
margin-bottom: 16px;
font-size: 13px;
}
.current-user strong {
color: var(--success);
font-weight: 600;
}
.current-user .logout-btn {
margin-top: 8px;
padding: 6px 12px;
background: rgba(244, 67, 54, 0.1);
border: 1px solid rgba(244, 67, 54, 0.3);
color: var(--error);
border-radius: 4px;
cursor: pointer;
font-size: 12px;
transition: all 0.2s;
width: 100%;
}
.current-user .logout-btn:hover {
background: rgba(244, 67, 54, 0.2);
}
/* User Cards */
.user-cards {
display: flex;
flex-direction: column;
gap: 8px;
}
.user-card {
display: flex;
align-items: center;
gap: 12px;
padding: 12px;
background: var(--card-bg);
border: 1px solid var(--sidebar-border);
border-radius: 6px;
cursor: pointer;
transition: all 0.2s;
}
.user-card:hover {
background: var(--card-hover);
border-color: var(--sidebar-accent);
transform: translateX(-2px);
}
.user-card.active {
background: rgba(0, 122, 204, 0.2);
border-color: var(--sidebar-accent);
}
.user-card .icon {
font-size: 24px;
width: 32px;
height: 32px;
display: flex;
align-items: center;
justify-content: center;
background: rgba(255,255,255,0.05);
border-radius: 50%;
}
.user-card .info {
flex: 1;
}
.user-card .label {
display: block;
font-size: 14px;
font-weight: 600;
margin-bottom: 2px;
}
.user-card .role {
display: block;
font-size: 11px;
opacity: 0.6;
text-transform: uppercase;
letter-spacing: 0.5px;
}
/* Status Messages */
.status-message {
padding: 12px;
border-radius: 6px;
font-size: 13px;
margin-bottom: 16px;
border: 1px solid;
}
.status-message.success {
background: rgba(76, 175, 80, 0.1);
border-color: rgba(76, 175, 80, 0.3);
color: var(--success);
}
.status-message.error {
background: rgba(244, 67, 54, 0.1);
border-color: rgba(244, 67, 54, 0.3);
color: var(--error);
}
.status-message.info {
background: rgba(0, 122, 204, 0.1);
border-color: rgba(0, 122, 204, 0.3);
color: var(--sidebar-accent);
}
/* Loading Spinner */
.loading {
display: inline-block;
width: 14px;
height: 14px;
border: 2px solid rgba(255,255,255,0.1);
border-top-color: var(--sidebar-accent);
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
/* Scrollbar */
#pawprint-sidebar::-webkit-scrollbar {
width: 8px;
}
#pawprint-sidebar::-webkit-scrollbar-track {
background: #1a1a1a;
}
#pawprint-sidebar::-webkit-scrollbar-thumb {
background: #444;
border-radius: 4px;
}
#pawprint-sidebar::-webkit-scrollbar-thumb:hover {
background: #555;
}
/* Footer */
.sidebar-footer {
padding: 16px 20px;
border-top: 1px solid var(--sidebar-border);
font-size: 11px;
opacity: 0.5;
text-align: center;
}
/* Responsive */
@media (max-width: 768px) {
#pawprint-sidebar {
width: 100%;
}
}

286
mainroom/sbwrapper/sidebar.js Executable file
View File

@@ -0,0 +1,286 @@
// Pawprint Wrapper - Sidebar Logic
class PawprintSidebar {
constructor() {
this.config = null;
this.currentUser = null;
this.sidebar = null;
this.toggleBtn = null;
}
async init() {
// Load configuration
await this.loadConfig();
// Create sidebar elements
this.createSidebar();
this.createToggleButton();
// Setup event listeners
this.setupEventListeners();
// Check if user is already logged in
this.checkCurrentUser();
// Load saved sidebar state
this.loadSidebarState();
}
async loadConfig() {
try {
const response = await fetch('/wrapper/config.json');
this.config = await response.json();
console.log('[Pawprint] Config loaded:', this.config.nest_name);
} catch (error) {
console.error('[Pawprint] Failed to load config:', error);
// Use default config
this.config = {
nest_name: 'default',
wrapper: {
environment: {
backend_url: 'http://localhost:8000',
frontend_url: 'http://localhost:3000'
},
users: []
}
};
}
}
createSidebar() {
const sidebar = document.createElement('div');
sidebar.id = 'pawprint-sidebar';
sidebar.innerHTML = this.getSidebarHTML();
document.body.appendChild(sidebar);
this.sidebar = sidebar;
}
createToggleButton() {
const button = document.createElement('button');
button.id = 'sidebar-toggle';
button.innerHTML = '<span class="icon">◀</span>';
button.title = 'Toggle Pawprint Sidebar (Ctrl+Shift+P)';
document.body.appendChild(button);
this.toggleBtn = button;
}
getSidebarHTML() {
const users = this.config.wrapper.users || [];
return `
<div class="sidebar-header">
<h2>🐾 Pawprint</h2>
<div class="nest-name">${this.config.nest_name}</div>
</div>
<div class="sidebar-content">
<div id="status-container"></div>
<!-- Quick Login Panel -->
<div class="panel">
<h3>👤 Quick Login</h3>
<div id="current-user-display" style="display: none;">
<div class="current-user">
Logged in as: <strong id="current-username"></strong>
<button class="logout-btn" onclick="pawprintSidebar.logout()">
Logout
</button>
</div>
</div>
<div class="user-cards">
${users.map(user => `
<div class="user-card" data-user-id="${user.id}" onclick="pawprintSidebar.loginAs('${user.id}')">
<div class="icon">${user.icon}</div>
<div class="info">
<span class="label">${user.label}</span>
<span class="role">${user.role}</span>
</div>
</div>
`).join('')}
</div>
</div>
<!-- Environment Info Panel -->
<div class="panel">
<h3>🌍 Environment</h3>
<div style="font-size: 12px; opacity: 0.8;">
<div style="margin-bottom: 8px;">
<strong>Backend:</strong><br>
<code style="font-size: 11px;">${this.config.wrapper.environment.backend_url}</code>
</div>
<div>
<strong>Frontend:</strong><br>
<code style="font-size: 11px;">${this.config.wrapper.environment.frontend_url}</code>
</div>
</div>
</div>
</div>
<div class="sidebar-footer">
Pawprint Dev Tools
</div>
`;
}
setupEventListeners() {
// Toggle button
this.toggleBtn.addEventListener('click', () => this.toggle());
// Keyboard shortcut: Ctrl+Shift+P
document.addEventListener('keydown', (e) => {
if (e.ctrlKey && e.shiftKey && e.key === 'P') {
e.preventDefault();
this.toggle();
}
});
}
toggle() {
this.sidebar.classList.toggle('expanded');
this.saveSidebarState();
}
saveSidebarState() {
const isExpanded = this.sidebar.classList.contains('expanded');
localStorage.setItem('pawprint_sidebar_expanded', isExpanded);
}
loadSidebarState() {
const isExpanded = localStorage.getItem('pawprint_sidebar_expanded') === 'true';
if (isExpanded) {
this.sidebar.classList.add('expanded');
}
}
showStatus(message, type = 'info') {
const container = document.getElementById('status-container');
const statusDiv = document.createElement('div');
statusDiv.className = `status-message ${type}`;
statusDiv.textContent = message;
container.innerHTML = '';
container.appendChild(statusDiv);
// Auto-remove after 5 seconds
setTimeout(() => {
statusDiv.remove();
}, 5000);
}
async loginAs(userId) {
const user = this.config.wrapper.users.find(u => u.id === userId);
if (!user) return;
this.showStatus(`Logging in as ${user.label}... ⏳`, 'info');
try {
const backendUrl = this.config.wrapper.environment.backend_url;
const response = await fetch(`${backendUrl}/api/token/`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
username: user.username,
password: user.password
})
});
if (!response.ok) {
throw new Error(`Login failed: ${response.status}`);
}
const data = await response.json();
// Store tokens
localStorage.setItem('access_token', data.access);
localStorage.setItem('refresh_token', data.refresh);
// Store user info
localStorage.setItem('user_info', JSON.stringify({
username: user.username,
label: user.label,
role: data.details?.role || user.role
}));
this.showStatus(`✓ Logged in as ${user.label}`, 'success');
this.currentUser = user;
this.updateCurrentUserDisplay();
// Reload page after short delay
setTimeout(() => {
window.location.reload();
}, 1000);
} catch (error) {
console.error('[Pawprint] Login error:', error);
this.showStatus(`✗ Login failed: ${error.message}`, 'error');
}
}
logout() {
localStorage.removeItem('access_token');
localStorage.removeItem('refresh_token');
localStorage.removeItem('user_info');
this.showStatus('✓ Logged out', 'success');
this.currentUser = null;
this.updateCurrentUserDisplay();
// Reload page after short delay
setTimeout(() => {
window.location.reload();
}, 1000);
}
checkCurrentUser() {
const userInfo = localStorage.getItem('user_info');
if (userInfo) {
try {
this.currentUser = JSON.parse(userInfo);
this.updateCurrentUserDisplay();
} catch (error) {
console.error('[Pawprint] Failed to parse user info:', error);
}
}
}
updateCurrentUserDisplay() {
const display = document.getElementById('current-user-display');
const username = document.getElementById('current-username');
if (this.currentUser) {
display.style.display = 'block';
username.textContent = this.currentUser.username;
// Highlight active user card
document.querySelectorAll('.user-card').forEach(card => {
card.classList.remove('active');
});
const activeCard = document.querySelector(`.user-card[data-user-id="${this.getUserIdByUsername(this.currentUser.username)}"]`);
if (activeCard) {
activeCard.classList.add('active');
}
} else {
display.style.display = 'none';
}
}
getUserIdByUsername(username) {
const user = this.config.wrapper.users.find(u => u.username === username);
return user ? user.id : null;
}
}
// Initialize sidebar when DOM is ready
const pawprintSidebar = new PawprintSidebar();
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', () => pawprintSidebar.init());
} else {
pawprintSidebar.init();
}
console.log('[Pawprint] Sidebar script loaded');

View File

@@ -0,0 +1,20 @@
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
EXPOSE 8000
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -0,0 +1,118 @@
# Nginx for Local Development
## Overview
The `docker-compose.nginx.yml` file provides an **optional** nginx container for local development. This is **NOT** used on AWS (which uses bare metal nginx).
## When to Use
Use nginx container when you want to access services via friendly domains locally:
- `http://amarmascotas.local.com` → amar
- `http://pawprint.local.com` → pawprint + services
## Setup
### 1. Generate Nginx Config
```bash
cd ../ctrl/server
./setup.sh --local
```
This creates `/tmp/core_nest.conf` with your local domain routing.
### 2. Ensure /etc/hosts is configured
```bash
# Should already be there, but verify:
grep "127.0.0.1.*amarmascotas.local.com" /etc/hosts
grep "127.0.0.1.*pawprint.local.com" /etc/hosts
```
### 3. Stop bare metal nginx (if running)
```bash
sudo systemctl stop nginx
# Optional: disable so it doesn't start on boot
sudo systemctl disable nginx
```
### 4. Start services WITH nginx
```bash
cd ../ctrl
./start.sh --with-nginx
# OR manually:
cd ../pawprint
docker compose -f docker-compose.yml -f docker-compose.nginx.yml up -d
```
## Without Nginx (Default)
If you don't use nginx, services are accessed via ports:
- `http://localhost:3000` → amar frontend
- `http://localhost:8000` → amar backend
- `http://localhost:13000` → pawprint
- `http://localhost:13001` → artery
- `http://localhost:13002` → album
- `http://localhost:13003` → ward
The default `docker-compose.yml` exposes these ports, so it works either way.
## Switching Between Nginx and Direct Access
**To use nginx routing:**
```bash
# Stop direct port access
cd ../ctrl && ./stop.sh
# Start with nginx
./start.sh --with-nginx
```
**To go back to direct ports:**
```bash
# Stop nginx version
cd ../pawprint
docker compose -f docker-compose.yml -f docker-compose.nginx.yml down
# Start without nginx
cd ../ctrl && ./start.sh
```
## AWS Production
On AWS, **do NOT use** `docker-compose.nginx.yml`. The AWS setup uses bare metal nginx configured via `ctrl/server/setup.sh --production`.
## Troubleshooting
**Port 80 already in use:**
```bash
# Check what's using it
ss -tlnp | grep :80
# If it's nginx bare metal
sudo systemctl stop nginx
# If it's another container
docker ps | grep :80
docker stop <container_name>
```
**Config not found error:**
```bash
# Make sure you ran setup first
cd ../ctrl/server && ./setup.sh --local
# Verify config exists
ls -la /tmp/core_nest.conf
```
**DNS not resolving:**
```bash
# Check /etc/hosts
cat /etc/hosts | grep local.com
# Test DNS
ping -c 1 amarmascotas.local.com
```

View File

@@ -0,0 +1,45 @@
# Nginx Reverse Proxy for Local Development
#
# This is OPTIONAL - only for local development
# AWS uses bare metal nginx instead
#
# Usage:
# docker compose -f docker-compose.yml -f docker-compose.nginx.yml up -d
#
# Before using:
# 1. Generate nginx config: cd ../ctrl/server && ./setup.sh --local
# 2. Stop bare metal nginx: sudo systemctl stop nginx (if installed)
#
# This nginx container will:
# - Listen on port 80
# - Route amarmascotas.local.com to amar frontend/backend
# - Route pawprint.local.com to pawprint services
services:
nginx:
image: nginx:alpine
container_name: ${DEPLOYMENT_NAME}_nginx
ports:
- "80:80"
volumes:
# Mount template that will be processed with envsubst
- ../ctrl/server/nginx/docker-local.conf:/etc/nginx/templates/default.conf.template:ro
# Mount wrapper files for serving
- ../wrapper:/app/wrapper:ro
env_file:
- .env
environment:
- DEPLOYMENT_NAME=${DEPLOYMENT_NAME}
- NEST_NAME=${NEST_NAME}
- MANAGED_DOMAIN=${MANAGED_DOMAIN}
- PAWPRINT_DOMAIN=${PAWPRINT_DOMAIN}
networks:
- default
depends_on:
- pawprint
restart: unless-stopped
networks:
default:
external: true
name: ${NETWORK_NAME}

View File

@@ -0,0 +1,93 @@
# Soleprint Services - Docker Compose
#
# Creates: soleprint, artery, atlas, station
# Network: Joins mainroom network (external)
#
# Usage:
# cd mainroom/soleprint && docker compose up -d
#
# Code: Mounts from bare metal at SOLEPRINT_BARE_PATH (restart to pick up changes)
#
# FUTURE: Room Switching
# - Station UI selector to pick target room
# - Environment loaded from data/rooms.json
# - DB_HOST, managed app URLs change based on selected room
services:
soleprint:
build:
context: ${SOLEPRINT_BARE_PATH}
dockerfile: ${SOLEPRINT_BARE_PATH}/Dockerfile
container_name: ${DEPLOYMENT_NAME}_soleprint
volumes:
- ${SOLEPRINT_BARE_PATH}:/app
ports:
- "${SOLEPRINT_PORT}:8000"
env_file:
- .env
networks:
- default
command: uvicorn main:app --host 0.0.0.0 --port 8000
artery:
build:
context: ${SOLEPRINT_BARE_PATH}/artery
dockerfile: ${SOLEPRINT_BARE_PATH}/Dockerfile
container_name: ${DEPLOYMENT_NAME}_artery
volumes:
- ${SOLEPRINT_BARE_PATH}/artery:/app
ports:
- "${ARTERY_PORT}:8000"
env_file:
- .env
depends_on:
- soleprint
networks:
- default
command: uvicorn main:app --host 0.0.0.0 --port 8000
atlas:
build:
context: ${SOLEPRINT_BARE_PATH}/atlas
dockerfile: ${SOLEPRINT_BARE_PATH}/Dockerfile
container_name: ${DEPLOYMENT_NAME}_atlas
volumes:
- ${SOLEPRINT_BARE_PATH}/atlas:/app
ports:
- "${ATLAS_PORT}:8000"
env_file:
- .env
depends_on:
- soleprint
networks:
- default
command: uvicorn main:app --host 0.0.0.0 --port 8000
station:
build:
context: ${SOLEPRINT_BARE_PATH}/station
dockerfile: ${SOLEPRINT_BARE_PATH}/Dockerfile
container_name: ${DEPLOYMENT_NAME}_station
environment:
# Database connection (from mainroom/.env when orchestrated)
- DB_HOST
- DB_PORT
- DB_NAME
- DB_USER
- DB_PASSWORD
volumes:
- ${SOLEPRINT_BARE_PATH}/station:/app
ports:
- "${STATION_PORT}:8000"
env_file:
- .env
depends_on:
- soleprint
networks:
- default
command: uvicorn main:app --host 0.0.0.0 --port 8000
networks:
default:
external: true
name: ${NETWORK_NAME}