commit 910a6465f2183e3f068fb27eb2c38519589bbb0e Author: Marco Allegretti Date: Tue Jan 27 17:21:58 2026 +0100 Initial commit: Likwid governance platform - Backend: Rust/Axum with PostgreSQL, plugin architecture - Frontend: Astro with polished UI - Voting methods: Approval, Ranked Choice, Schulze, STAR, Quadratic - Features: Liquid delegation, transparent moderation, structured deliberation - Documentation: User and admin guides in /docs - Deployment: Docker/Podman compose files for production and demo - Demo: Seeded data with 3 communities, 13 users, 7 proposals License: AGPLv3 diff --git a/.dev/dev-token.txt b/.dev/dev-token.txt new file mode 100644 index 0000000..cee2b65 --- /dev/null +++ b/.dev/dev-token.txt @@ -0,0 +1 @@ +eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxNWRjNGQ2OS1mOGMxLTRjNTEtYTE2Yy1iMThhYzIyMmNkMzEiLCJ1c2VybmFtZSI6ImRldmFkbWluIiwiZXhwIjoxNzY5NTA1MjA2LCJpYXQiOjE3Njk0MTg4MDZ9.eWgwHPFLUE71RNi8KF1rDkOtECLxRsal6rnl0hMQfZQ \ No newline at end of file diff --git a/.dev/pids/backend.pid b/.dev/pids/backend.pid new file mode 100644 index 0000000..fd5cef2 --- /dev/null +++ b/.dev/pids/backend.pid @@ -0,0 +1 @@ +10488 \ No newline at end of file diff --git a/.dev/pids/frontend.pid b/.dev/pids/frontend.pid new file mode 100644 index 0000000..fab5d58 --- /dev/null +++ b/.dev/pids/frontend.pid @@ -0,0 +1 @@ +12944 \ No newline at end of file diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..6fc29b5 --- /dev/null +++ b/.env.example @@ -0,0 +1,4 @@ +DATABASE_URL=postgres://likwid:likwid@localhost:5432/likwid +POSTGRES_USER=likwid +POSTGRES_PASSWORD=likwid +POSTGRES_DB=likwid diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..225055e --- /dev/null +++ b/.gitignore @@ -0,0 +1,49 @@ +# Rust +/backend/target/ +**/*.rs.bk +Cargo.lock + +# Node +/frontend/node_modules/ +/frontend/dist/ +/frontend/.astro/ + +# Environment +.env +.env.local +.env.*.local +backend/.env + +# Development state +.dev/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +npm-debug.log* + +# Podman +.podman/ + +# Internal development documentation (not for public) +/docu_dev/ + +# Compose environment files (contain secrets) +/compose/.env.production +/compose/.env.demo + +# Database +*.sql.backup + +# Build artifacts +*.dump +*.dump.gz diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..12d873e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,208 @@ +# Contributing to Likwid + +## Getting Started + +### Development Environment + +#### Prerequisites + +**All Platforms:** +- Git +- Rust (via rustup) — latest stable +- Node.js LTS (20.x+) +- Podman (for database) + +**Windows:** +- WSL2 enabled +- Podman Desktop with WSL2 backend +- MSVC toolchain for Rust + +**Linux:** +- podman-compose + +### Quick Setup + +```bash +# Clone the repository +git clone https://codeberg.org/likwid/likwid +cd likwid + +# Copy environment configuration +cp .env.example .env + +# Start development environment +# Windows: +.\scripts\dev-start.ps1 + +# Linux: +./scripts/dev-start.sh +``` + +### Project Structure + +``` +likwid/ +├── backend/ # Rust backend (Axum) +│ ├── src/ +│ │ ├── api/ # HTTP endpoints +│ │ ├── auth/ # Authentication +│ │ ├── models/ # Database models +│ │ └── plugins/ # Plugin system +│ └── migrations/ # SQLx migrations +├── frontend/ # Astro frontend +│ └── src/ +│ ├── pages/ # Routes +│ ├── layouts/ # Page templates +│ └── components/# Reusable UI +├── scripts/ # Dev scripts +└── compose/ # Container configs +``` + +## Development Workflow + +### Branch Naming + +- `feature/description` — New features +- `fix/description` — Bug fixes +- `docs/description` — Documentation +- `refactor/description` — Code refactoring + +``` +type(scope): description + +[optional body] + +[optional footer] +``` + +**Types:** `feat`, `fix`, `docs`, `style`, `refactor`, `test`, `chore` + +**Examples:** +``` +feat(voting): implement Schulze method +fix(auth): correct JWT expiration handling +docs(readme): add Voting System info +``` + +### Code Style + +#### Rust (Backend) + +- Follow `rustfmt` defaults +- Use `clippy` for linting +- Document public APIs with `///` comments +- Prefer `Result` over panics + +```bash +cd backend +cargo fmt --check +cargo clippy +``` + +#### TypeScript (Frontend) + +- Strict mode enabled +- Use TypeScript for all new code +- Follow Astro conventions + +```bash +cd frontend +npm run check +``` + +### Testing + +```bash +# Backend tests +cd backend +cargo test + +# Frontend checks +cd frontend +npm run check +``` + +### Database Migrations + +We use SQLx for compile-time checked queries: + +```bash +cd backend + +# Create a new migration +sqlx migrate add description_of_change + +# Run migrations +sqlx migrate run + +# Prepare offline query data (for CI) +cargo sqlx prepare +``` + +## Contributing Guidelines + +### Before You Start + +1. **Check existing issues** — Someone may already be working on it +2. **Open an issue first** — For significant changes, discuss before coding +3. **Keep changes focused** — One feature/fix per merge request + +### Submitting Changes + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Ensure tests pass +5. Submit a merge request + +### Merge Request Checklist + +- [ ] Code follows project style +- [ ] Tests added/updated +- [ ] Documentation updated +- [ ] Commit messages follow conventions +- [ ] No unrelated changes + + +### Find a Third Way + +When opinions conflict, seek solutions that satisfy everyone rather than taking sides. The best outcomes come from understanding all perspectives. + +### Be Pragmatic + +We value tangible results over theoretical debates. If something works and improves the project, it's worth considering. + +### Support Each Other + +Help newcomers, answer questions patiently, and remember that everyone was new once. + +## Areas for Contribution + +### High Priority +- Advanced voting methods (Schulze, STAR, Quadratic) +- Liquid delegation engine +- Accessibility improvements +- Mobile responsiveness +- Internationalization (i18n) + +### Plugin Development +- Create new plugins for the WASM runtime +- Improve plugin documentation +- Build integrations (GitLab, Matrix, etc.) + +### Documentation +- User guides +- API documentation +- Tutorial videos +- Translations + +### Design +- UI/UX improvements +- Icon design +- Theme development + +## License + +By contributing, you agree that your contributions will be licensed under LGPL-2.1-or-later. + +--- diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..8b0bf8d --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,188 @@ +# Likwid Deployment Guide + +Likwid supports two distinct deployment modes: **Production** and **Demo**. These are separate instances with their own databases. + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ PRODUCTION │ +│ ┌──────────┐ ┌──────────┐ ┌──────────────────────┐ │ +│ │ Frontend │───▶│ Backend │───▶│ PostgreSQL (prod_db) │ │ +│ │ :4321 │ │ :3000 │ │ :5432 │ │ +│ └──────────┘ └──────────┘ └──────────────────────┘ │ +│ DEMO_MODE=false │ No demo data │ Clean database │ +└─────────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────────┐ +│ DEMO │ +│ ┌──────────┐ ┌──────────┐ ┌──────────────────────┐ │ +│ │ Frontend │───▶│ Backend │───▶│ PostgreSQL (demo_db) │ │ +│ │ :4322 │ │ :3001 │ │ :5433 │ │ +│ └──────────┘ └──────────┘ └──────────────────────┘ │ +│ DEMO_MODE=true │ Seed data │ Resettable database │ +└─────────────────────────────────────────────────────────────────┘ +``` + +## Quick Start + +### Production Deployment + +```bash +# 1. Copy and configure environment +cp compose/.env.production.example compose/.env.production + +# 2. Edit .env.production with secure values +# - Set strong POSTGRES_PASSWORD +# - Set random JWT_SECRET (64+ chars) +# - Set your domain in API_BASE + +# 3. Remove demo seed migration (important!) +rm backend/migrations/20260127150000_demo_seed_data.sql + +# 4. Deploy +cd compose +podman-compose --env-file .env.production -f production.yml up -d + +# 5. Access at http://localhost:4321 +``` + +### Demo Deployment + +```bash +# 1. Copy environment (defaults are fine for demo) +cp compose/.env.demo.example compose/.env.demo + +# 2. Deploy +cd compose +podman-compose --env-file .env.demo -f demo.yml up -d + +# 3. Access at http://localhost:4322 +``` + +## Demo Instance Details + +### Demo Accounts + +| Username | Password | Role | +|-------------|----------|------------------------------| +| contributor | demo123 | Standard member | +| moderator | demo123 | Can moderate content | +| observer | demo123 | Read-only access | + +### Pre-seeded Data + +- **3 Communities** + - Aurora Framework (tech/OSS governance) + - Civic Commons Network (civic engagement) + - Regional Makers Collective (federated makerspaces) + +- **13 Users** with realistic profiles + +- **7 Proposals** in various states (draft, discussion, voting, closed) + +- **Delegation relationships** demonstrating liquid democracy + +- **Moderation history** showing governance in action + +### Resetting Demo + +To reset the demo to a clean state: + +```bash +# Windows +.\scripts\demo-reset.ps1 + +# Linux/macOS +./scripts/demo-reset.sh + +# Or manually: +podman-compose -f compose/demo.yml down -v +podman-compose --env-file .env.demo -f compose/demo.yml up -d +``` + +## Configuration Reference + +### Environment Variables + +| Variable | Production Default | Demo Default | Description | +|-------------------|-------------------|---------------------------|--------------------------------| +| POSTGRES_USER | likwid | likwid_demo | Database username | +| POSTGRES_PASSWORD | (required) | demo_secret_change_me | Database password | +| POSTGRES_DB | likwid_prod | likwid_demo | Database name | +| DB_PORT | 5432 | 5433 | Database port | +| JWT_SECRET | (required) | demo_jwt_secret_... | JWT signing secret | +| BACKEND_PORT | 3000 | 3001 | Backend API port | +| FRONTEND_PORT | 4321 | 4322 | Frontend port | +| API_BASE | (your domain) | http://localhost:3001 | Public API URL | +| DEMO_MODE | false | true | Enable demo features | + +### Demo Mode Features + +When `DEMO_MODE=true`: +- Demo accounts are recognized and can log in +- Destructive actions on demo data are restricted +- Reset endpoint available at `/api/demo/reset` (admin only) +- Demo status shown at `/api/demo/status` + +## Development Setup + +For local development without containers: + +```bash +# 1. Start only the database +podman-compose -f compose/dev.yml up -d + +# 2. Run backend natively +cd backend +cp ../.env.example .env # Configure DATABASE_URL +cargo run + +# 3. Run frontend natively +cd frontend +npm run dev +``` + +## Monitoring & Logs + +```bash +# View all logs +podman-compose -f compose/demo.yml logs -f + +# View specific service +podman-compose -f compose/demo.yml logs -f backend + +# Check health +curl http://localhost:3001/health +``` + +## Troubleshooting + +### Database connection issues +```bash +# Check if postgres is running +podman-compose -f compose/demo.yml ps + +# View postgres logs +podman-compose -f compose/demo.yml logs postgres +``` + +### Migration failures +```bash +# Connect to database and check +podman exec -it likwid-demo-db psql -U likwid_demo -d likwid_demo + +# List tables +\dt + +# Check migration status +SELECT * FROM _sqlx_migrations; +``` + +### Reset everything +```bash +# Nuclear option - removes all data and volumes +podman-compose -f compose/demo.yml down -v +podman volume prune -f +podman-compose -f compose/demo.yml up -d +``` diff --git a/README.md b/README.md new file mode 100644 index 0000000..9e3341e --- /dev/null +++ b/README.md @@ -0,0 +1,158 @@ +# Likwid - Modular Governance Platform + +**Democracy Design in Practice** + +Likwid is an open-source platform for participatory governance, designed to make collective decision-making accessible, transparent, and genuinely democratic. Built for communities, civic organizations, and any group that values structured deliberation over shouting matches. + +> *"We are citizens of the 21st century, but we rely on institutions designed in the 19th century. The problem is not democracy, it's the interface."* + +## Philosophy + +Likwid implements the principles of **Democracy Design**: + +- **Information must be understandable**, not just available +- **Listening matters more than speaking** — structured deliberation over flame wars +- **Voting should express nuance** — from simple approval to Schulze and quadratic methods +- **Delegation should be fluid** — trust networks that adapt in real-time +- **Participation is designed**, not imposed + +## Features + +### Deliberative Democracy +- **Inform → Discuss → Decide** workflow for proposals +- Resource libraries for informed participation +- Small group discussions with facilitators +- "Read before discuss" requirements +- Constructive comment visibility scoring + +### Advanced Voting Methods +- **Approval Voting** — vote for multiple options +- **Ranked Choice** — order preferences +- **Schulze Method** — Condorcet-consistent pairwise comparison +- **STAR Voting** — score + automatic runoff +- **Quadratic Voting** — express intensity of preference + +### Liquid Delegation +- Delegate your vote by topic or globally +- Real-time transparency: see how delegates vote +- Revoke delegation instantly +- Transitive delegation chains +- Delegation analytics and trust networks + +### Modular Plugin System +- WASM-based sandboxed plugins +- Per-community plugin configuration +- Hook-based architecture (actions/filters) +- Built-in and third-party plugins +- Admin policy for signed/unsigned plugins + +### Governance Infrastructure +- Multi-community platform support +- Granular admin controls (platform mode, registration, moderation) +- Public moderation ledger (immutable) +- Role-based access (admin, moderator, facilitator, member) +- Anonymous voting with identity separation + +## Tech Stack + +| Layer | Technology | +|-------|------------| +| **Backend** | Rust (Axum 0.8, Tokio, SQLx) | +| **Frontend** | Astro + TypeScript | +| **Database** | PostgreSQL 16 | +| **Plugins** | WebAssembly (wasmtime) | +| **Containers** | Podman (rootless) | + +## Quick Start + +### Prerequisites + +**Windows:** +- Windows 10/11 with WSL2 +- Podman Desktop (WSL2 backend) +- Rust (rustup, MSVC toolchain) +- Node.js LTS + +**Linux:** +- Podman + podman-compose +- Rust (rustup) +- Node.js LTS + +### Development + +```powershell +# 1. Clone and configure +git clone https://invent.kde.org/marcoa/likwid.git +cd likwid +cp .env.example .env + +# 2. Start everything (database + backend + frontend) +.\scripts\dev-start.ps1 + +# 3. Stop everything +.\scripts\dev-stop.ps1 +``` + +The platform will be available at: +- **Frontend**: http://localhost:4321 +- **Backend API**: http://localhost:3000 +- **Setup Wizard**: http://localhost:4321/setup (first run) + +### First Run + +1. Navigate to `/register` to create the first user (automatically becomes admin) +2. Complete platform setup at `/setup` +3. Configure instance settings at `/admin/settings` +4. Create your first community + +## Project Structure + +``` +likwid/ +├── backend/ # Rust backend +│ ├── src/ +│ │ ├── api/ # REST endpoints +│ │ ├── auth/ # JWT authentication +│ │ ├── models/ # Database models +│ │ └── plugins/ # Plugin system (WASM + builtins) +│ └── migrations/ # SQL migrations +├── frontend/ # Astro frontend +│ ├── src/ +│ │ ├── pages/ # Routes +│ │ ├── layouts/ # Page layouts +│ │ └── components/ # UI components +├── compose/ # Podman compose files +├── scripts/ # Dev scripts (cross-platform) +└── docu_dev/ # Design documents +``` + +### Core Principles + +1. **Be considerate** — Your work affects others +2. **Be respectful** — Assume good intentions +3. **Be collaborative** — Work transparently +4. **Be pragmatic** — Results over debates +5. **Find a third way** — Seek solutions that satisfy everyone + +## Roadmap + +- [x] Core voting infrastructure +- [x] Plugin system (WASM + builtins) +- [x] Deliberation phases +- [x] Comment quality scoring +- [ ] Advanced voting methods (Schulze, STAR, Quadratic) +- [ ] Liquid delegation engine +- [ ] GitLab/GitHub integration - plugin +- [ ] Mobile-responsive UI +- [ ] Accessibility audit (WCAG 2.1) + +## License + +LGPL-2.1-or-later +## Acknowledgments + +Inspired by: +- [Pol.is](https://pol.is/) — Opinion mapping +- [Decidim](https://decidim.org/) — Participatory democracy +- [LiquidFeedback](https://liquidfeedback.org/) — Liquid democracy +- [Equal Vote Coalition](https://www.equal.vote/) — STAR Voting diff --git a/WORKFLOW.md b/WORKFLOW.md new file mode 100644 index 0000000..dac731d --- /dev/null +++ b/WORKFLOW.md @@ -0,0 +1,125 @@ +# Likwid Development Workflow + +## Understanding the Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ SHARED CODEBASE │ +│ backend/src/* frontend/src/* migrations/* │ +└─────────────────────────────────────────────────────────────┘ + │ + ┌───────────────┴───────────────┐ + ▼ ▼ +┌─────────────────────┐ ┌─────────────────────┐ +│ PRODUCTION DB │ │ DEMO DB │ +│ (likwid_prod) │ │ (likwid_demo) │ +│ │ │ │ +│ - No demo users │ │ - Demo users seeded │ +│ - No seed data │ │ - Full sample data │ +│ - DEMO_MODE=false │ │ - DEMO_MODE=true │ +│ - Real user data │ │ - Resettable │ +└─────────────────────┘ └─────────────────────┘ +``` + +**Key insight**: Code changes affect both. Data is separate. + +## Daily Development Workflow + +### Your current setup IS the demo +Your local environment with `DEMO_MODE=true` and the seeded database is functionally equivalent to the demo deployment. + +### Making code changes +1. Edit code normally +2. Backend auto-reloads (or `cargo run`) +3. Frontend hot-reloads (`npm run dev`) +4. Test with demo accounts + +### Testing as different users +``` +contributor / demo123 → Standard member experience +moderator / demo123 → Moderation features +observer / demo123 → Read-only experience +``` + +## When You Want a Fresh Demo + +### Option 1: Reset database (quick) +```powershell +# Drop and recreate database +cd backend +sqlx database drop -y +sqlx database create +sqlx migrate run +``` + +### Option 2: Use the reset script (when using containers) +```powershell +.\scripts\demo-reset.ps1 +``` + +## Deploying for Others + +### For testers/curious users (Demo) +```bash +# Uses separate database on port 5433, backend on 3001, frontend on 4322 +cp compose/.env.demo.example compose/.env.demo +podman-compose --env-file compose/.env.demo -f compose/demo.yml up -d +``` + +### For real users (Production) +```bash +# 1. Remove demo seed data +rm backend/migrations/20260127150000_demo_seed_data.sql + +# 2. Configure production +cp compose/.env.production.example compose/.env.production +# Edit with secure passwords and your domain + +# 3. Deploy +podman-compose --env-file compose/.env.production -f compose/production.yml up -d +``` + +## Common Tasks + +### "I broke the demo data" +```powershell +cd backend +sqlx database drop -y && sqlx database create && sqlx migrate run +``` + +### "I want to test production-like (no demo data)" +```powershell +# Temporarily move the demo migration +mv backend/migrations/20260127150000_demo_seed_data.sql backend/migrations/20260127150000_demo_seed_data.sql.bak + +# Reset database +sqlx database drop -y && sqlx database create && sqlx migrate run + +# Set DEMO_MODE=false in .env +``` + +### "I want demo data back" +```powershell +# Restore migration +mv backend/migrations/20260127150000_demo_seed_data.sql.bak backend/migrations/20260127150000_demo_seed_data.sql + +# Reset database +sqlx database drop -y && sqlx database create && sqlx migrate run + +# Set DEMO_MODE=true in .env +``` + +### "I want to run both demo and production locally" +Use the container deployments - they use different ports: +- Demo: localhost:4322 (frontend), localhost:3001 (backend) +- Prod: localhost:4321 (frontend), localhost:3000 (backend) + +## Summary + +| Concern | Solution | +|---------|----------| +| Code changes affect demo? | Yes, same codebase. That's expected. | +| Data separation | Different databases (demo vs prod) | +| Reset demo | `sqlx database drop && create && migrate` | +| Test production locally | Remove demo migration, set DEMO_MODE=false | +| Deploy for others | Use compose files with separate DBs | diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..afba0da --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,26 @@ +# Likwid Backend Configuration +# Copy this file to .env and configure as needed + +# Database connection URL +DATABASE_URL=postgres://likwid:likwid@localhost:5432/likwid + +# Server configuration +SERVER_HOST=127.0.0.1 +SERVER_PORT=3000 + +# JWT Secret for authentication tokens +# IMPORTANT: Change this in production! +JWT_SECRET=change-me-in-production + +# ============================================================================= +# DEMO MODE +# ============================================================================= +# Enable demo mode for public demonstration instances. +# When enabled: +# - Restricts destructive actions (delete communities, modify instance settings) +# - Enables demo accounts (contributor, moderator, observer) with password: demo123 +# - Loads seed data with realistic governance history +# - Data can be reset via POST /api/demo/reset +# +# Set to true for demo/showcase instances, false for production +DEMO_MODE=false diff --git a/backend/.sqlx/query-00649c07335338a85657781bfe97b299039883e1170687d60047ced9f3271b8f.json b/backend/.sqlx/query-00649c07335338a85657781bfe97b299039883e1170687d60047ced9f3271b8f.json new file mode 100644 index 0000000..ced24e1 --- /dev/null +++ b/backend/.sqlx/query-00649c07335338a85657781bfe97b299039883e1170687d60047ced9f3271b8f.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO amendment_support (amendment_id, user_id, support_type, comment)\n VALUES ($1, $2, $3, $4)\n ON CONFLICT (amendment_id, user_id) DO UPDATE SET\n support_type = $3,\n comment = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "00649c07335338a85657781bfe97b299039883e1170687d60047ced9f3271b8f" +} diff --git a/backend/.sqlx/query-00b25a5d76ada968ebb490cdfa9b30d82de7402bda296872eb4a366bd2942640.json b/backend/.sqlx/query-00b25a5d76ada968ebb490cdfa9b30d82de7402bda296872eb4a366bd2942640.json new file mode 100644 index 0000000..e3e06b4 --- /dev/null +++ b/backend/.sqlx/query-00b25a5d76ada968ebb490cdfa9b30d82de7402bda296872eb4a366bd2942640.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT pp.id, pp.name, pp.wasm_bytes, pp.manifest\n FROM plugin_packages pp\n JOIN community_plugin_packages cpp ON cpp.package_id = pp.id\n WHERE cpp.is_active = true\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "wasm_bytes", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "manifest", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "00b25a5d76ada968ebb490cdfa9b30d82de7402bda296872eb4a366bd2942640" +} diff --git a/backend/.sqlx/query-00c90349026ba6858b28e413cff2e1b71f87d06dea5759fb6159da22a995e341.json b/backend/.sqlx/query-00c90349026ba6858b28e413cff2e1b71f87d06dea5759fb6159da22a995e341.json new file mode 100644 index 0000000..5b340a5 --- /dev/null +++ b/backend/.sqlx/query-00c90349026ba6858b28e413cff2e1b71f87d06dea5759fb6159da22a995e341.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO notifications (user_id, type, title, message, link) VALUES ($1, $2, $3, $4, $5)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Text", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "00c90349026ba6858b28e413cff2e1b71f87d06dea5759fb6159da22a995e341" +} diff --git a/backend/.sqlx/query-00e2f11aa7f20e01f9a9de158b81fbcd5a33511135ce3e05aea3c8c8846239b3.json b/backend/.sqlx/query-00e2f11aa7f20e01f9a9de158b81fbcd5a33511135ce3e05aea3c8c8846239b3.json new file mode 100644 index 0000000..f504a99 --- /dev/null +++ b/backend/.sqlx/query-00e2f11aa7f20e01f9a9de158b81fbcd5a33511135ce3e05aea3c8c8846239b3.json @@ -0,0 +1,151 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, author_id, title, description, \n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id\n FROM proposals \n WHERE community_id = $1 \n ORDER BY created_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "00e2f11aa7f20e01f9a9de158b81fbcd5a33511135ce3e05aea3c8c8846239b3" +} diff --git a/backend/.sqlx/query-03706c8c7d9db6d3112ccbd27fd026308c5a03a923f53331468cc899eff9a08d.json b/backend/.sqlx/query-03706c8c7d9db6d3112ccbd27fd026308c5a03a923f53331468cc899eff9a08d.json new file mode 100644 index 0000000..012bb7a --- /dev/null +++ b/backend/.sqlx/query-03706c8c7d9db6d3112ccbd27fd026308c5a03a923f53331468cc899eff9a08d.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO rule_violations (\n community_id, rule_id, target_user_id, \n reported_by, report_reason, report_evidence\n ) VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [ + false + ] + }, + "hash": "03706c8c7d9db6d3112ccbd27fd026308c5a03a923f53331468cc899eff9a08d" +} diff --git a/backend/.sqlx/query-03b9920a00f57083543abd69b18965bdb37d21ba503691cd054ed06f6807d7f7.json b/backend/.sqlx/query-03b9920a00f57083543abd69b18965bdb37d21ba503691cd054ed06f6807d7f7.json new file mode 100644 index 0000000..59cc299 --- /dev/null +++ b/backend/.sqlx/query-03b9920a00f57083543abd69b18965bdb37d21ba503691cd054ed06f6807d7f7.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT user_has_permission($1, $2, $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "user_has_permission", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "03b9920a00f57083543abd69b18965bdb37d21ba503691cd054ed06f6807d7f7" +} diff --git a/backend/.sqlx/query-0474f023ed456e56e6a744f8a94e4afc361456a4979e1d14378d1f475c1b2192.json b/backend/.sqlx/query-0474f023ed456e56e6a744f8a94e4afc361456a4979e1d14378d1f475c1b2192.json new file mode 100644 index 0000000..57d71f8 --- /dev/null +++ b/backend/.sqlx/query-0474f023ed456e56e6a744f8a94e4afc361456a4979e1d14378d1f475c1b2192.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, name, slug, description, parent_id\n FROM topics\n WHERE community_id = $1\n ORDER BY name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "parent_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + true + ] + }, + "hash": "0474f023ed456e56e6a744f8a94e4afc361456a4979e1d14378d1f475c1b2192" +} diff --git a/backend/.sqlx/query-04c1f2a6a596b9cda5a0744b941777916a3aa3c03445ce59a5a3a69f869078de.json b/backend/.sqlx/query-04c1f2a6a596b9cda5a0744b941777916a3aa3c03445ce59a5a3a69f869078de.json new file mode 100644 index 0000000..37f8d49 --- /dev/null +++ b/backend/.sqlx/query-04c1f2a6a596b9cda5a0744b941777916a3aa3c03445ce59a5a3a69f869078de.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id, type as notification_type, title, message, link, is_read, created_at\n FROM notifications\n WHERE user_id = $1\n ORDER BY created_at DESC\n LIMIT 50\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "notification_type", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "message", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "link", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_read", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false + ] + }, + "hash": "04c1f2a6a596b9cda5a0744b941777916a3aa3c03445ce59a5a3a69f869078de" +} diff --git a/backend/.sqlx/query-0569967ce647a065b60b93a233bd222d7dc8aef1eeffec8796dae06968faf08d.json b/backend/.sqlx/query-0569967ce647a065b60b93a233bd222d7dc8aef1eeffec8796dae06968faf08d.json new file mode 100644 index 0000000..0174109 --- /dev/null +++ b/backend/.sqlx/query-0569967ce647a065b60b93a233bd222d7dc8aef1eeffec8796dae06968faf08d.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO deliberation_reading_log (proposal_id, user_id, first_read_at, reading_time_seconds)\n VALUES ($1, $2, NOW(), $3)\n ON CONFLICT (proposal_id, user_id) DO UPDATE SET\n read_proposal = CASE WHEN $4 = 'proposal' THEN true ELSE deliberation_reading_log.read_proposal END,\n read_summaries = CASE WHEN $4 = 'summaries' THEN true ELSE deliberation_reading_log.read_summaries END,\n read_top_arguments = CASE WHEN $4 = 'arguments' THEN true ELSE deliberation_reading_log.read_top_arguments END,\n reading_time_seconds = deliberation_reading_log.reading_time_seconds + $3,\n updated_at = NOW()", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Int4", + "Text" + ] + }, + "nullable": [] + }, + "hash": "0569967ce647a065b60b93a233bd222d7dc8aef1eeffec8796dae06968faf08d" +} diff --git a/backend/.sqlx/query-05ab322014e6f15af99d2af3c118f0cf64b06f311b17ff3c7197d949bab2a580.json b/backend/.sqlx/query-05ab322014e6f15af99d2af3c118f0cf64b06f311b17ff3c7197d949bab2a580.json new file mode 100644 index 0000000..f683eb9 --- /dev/null +++ b/backend/.sqlx/query-05ab322014e6f15af99d2af3c118f0cf64b06f311b17ff3c7197d949bab2a580.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT c.* FROM communities c\n JOIN community_members cm ON c.id = cm.community_id\n WHERE cm.user_id = $1 AND c.is_active = true\n ORDER BY cm.joined_at DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "settings", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "05ab322014e6f15af99d2af3c118f0cf64b06f311b17ff3c7197d949bab2a580" +} diff --git a/backend/.sqlx/query-05d8db279bba917c2456ca3427876caefc67b9b6a3d9031ab43f998462a75a58.json b/backend/.sqlx/query-05d8db279bba917c2456ca3427876caefc67b9b6a3d9031ab43f998462a75a58.json new file mode 100644 index 0000000..57b0401 --- /dev/null +++ b/backend/.sqlx/query-05d8db279bba917c2456ca3427876caefc67b9b6a3d9031ab43f998462a75a58.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, community_id, title, description,\n conflict_type::text AS \"conflict_type!\",\n status::text AS \"status!\",\n severity_level, is_urgent\n FROM conflict_cases WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "conflict_type!", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "severity_level", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "is_urgent", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + null, + false, + false + ] + }, + "hash": "05d8db279bba917c2456ca3427876caefc67b9b6a3d9031ab43f998462a75a58" +} diff --git a/backend/.sqlx/query-06415d7e9b1b54bc96b8c35b6c92649ffe74f1d9047c783810c344c75f3b10af.json b/backend/.sqlx/query-06415d7e9b1b54bc96b8c35b6c92649ffe74f1d9047c783810c344c75f3b10af.json new file mode 100644 index 0000000..dd9fbf3 --- /dev/null +++ b/backend/.sqlx/query-06415d7e9b1b54bc96b8c35b6c92649ffe74f1d9047c783810c344c75f3b10af.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO compromise_proposals (\n conflict_id, title, description, proposed_actions,\n proposed_by, proposed_by_role\n ) VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text", + "Jsonb", + "Uuid", + "Varchar" + ] + }, + "nullable": [ + false + ] + }, + "hash": "06415d7e9b1b54bc96b8c35b6c92649ffe74f1d9047c783810c344c75f3b10af" +} diff --git a/backend/.sqlx/query-06c276dfa8e0d19cf539d9abdd4699b387c4a8435c11ac1fe7c4a53d5c231ba6.json b/backend/.sqlx/query-06c276dfa8e0d19cf539d9abdd4699b387c4a8435c11ac1fe7c4a53d5c231ba6.json new file mode 100644 index 0000000..c19b087 --- /dev/null +++ b/backend/.sqlx/query-06c276dfa8e0d19cf539d9abdd4699b387c4a8435c11ac1fe7c4a53d5c231ba6.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT author_id, facilitator_id FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "06c276dfa8e0d19cf539d9abdd4699b387c4a8435c11ac1fe7c4a53d5c231ba6" +} diff --git a/backend/.sqlx/query-075e3c636c51526e7ceab5540cccd87e52939da3a48e4a544fe67457e8de6962.json b/backend/.sqlx/query-075e3c636c51526e7ceab5540cccd87e52939da3a48e4a544fe67457e8de6962.json new file mode 100644 index 0000000..0f8cf1d --- /dev/null +++ b/backend/.sqlx/query-075e3c636c51526e7ceab5540cccd87e52939da3a48e4a544fe67457e8de6962.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT allow_community_vote FROM community_rules WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "allow_community_vote", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "075e3c636c51526e7ceab5540cccd87e52939da3a48e4a544fe67457e8de6962" +} diff --git a/backend/.sqlx/query-098ff8cc94787edf79a98396716cb5296547c407516489b5b873dadfa360f91e.json b/backend/.sqlx/query-098ff8cc94787edf79a98396716cb5296547c407516489b5b873dadfa360f91e.json new file mode 100644 index 0000000..a86c7f8 --- /dev/null +++ b/backend/.sqlx/query-098ff8cc94787edf79a98396716cb5296547c407516489b5b873dadfa360f91e.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO quadratic_votes (proposal_id, voter_id, option_id, credits) VALUES ($1, $2, $3, $4)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "098ff8cc94787edf79a98396716cb5296547c407516489b5b873dadfa360f91e" +} diff --git a/backend/.sqlx/query-09e0a994d85ba6589db1c989f193ef10e081ccc560031a056e80cb19436f5e8e.json b/backend/.sqlx/query-09e0a994d85ba6589db1c989f193ef10e081ccc560031a056e80cb19436f5e8e.json new file mode 100644 index 0000000..8d33fff --- /dev/null +++ b/backend/.sqlx/query-09e0a994d85ba6589db1c989f193ef10e081ccc560031a056e80cb19436f5e8e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM workflow_templates \n WHERE community_id = $1 AND is_default = true\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "09e0a994d85ba6589db1c989f193ef10e081ccc560031a056e80cb19436f5e8e" +} diff --git a/backend/.sqlx/query-0cec90f8ac9b48f22ca24330afb16c9308998ddd30404d2a1eb38bd8d0fa46be.json b/backend/.sqlx/query-0cec90f8ac9b48f22ca24330afb16c9308998ddd30404d2a1eb38bd8d0fa46be.json new file mode 100644 index 0000000..25c3d52 --- /dev/null +++ b/backend/.sqlx/query-0cec90f8ac9b48f22ca24330afb16c9308998ddd30404d2a1eb38bd8d0fa46be.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT setup_completed FROM instance_settings LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "setup_completed", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "0cec90f8ac9b48f22ca24330afb16c9308998ddd30404d2a1eb38bd8d0fa46be" +} diff --git a/backend/.sqlx/query-0d3765f6d5135140d623f9dc706b6907686b98158d775978709db26486109d8b.json b/backend/.sqlx/query-0d3765f6d5135140d623f9dc706b6907686b98158d775978709db26486109d8b.json new file mode 100644 index 0000000..52f0f32 --- /dev/null +++ b/backend/.sqlx/query-0d3765f6d5135140d623f9dc706b6907686b98158d775978709db26486109d8b.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT registration_enabled, registration_mode FROM instance_settings LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "registration_enabled", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "registration_mode", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "0d3765f6d5135140d623f9dc706b6907686b98158d775978709db26486109d8b" +} diff --git a/backend/.sqlx/query-0d79d19ab27c1962afc4ef802f386e160c4a555ca59bade20c901d7b8be99d0e.json b/backend/.sqlx/query-0d79d19ab27c1962afc4ef802f386e160c4a555ca59bade20c901d7b8be99d0e.json new file mode 100644 index 0000000..1348c59 --- /dev/null +++ b/backend/.sqlx/query-0d79d19ab27c1962afc4ef802f386e160c4a555ca59bade20c901d7b8be99d0e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT is_core FROM default_plugins WHERE plugin_name = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_core", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "0d79d19ab27c1962afc4ef802f386e160c4a555ca59bade20c901d7b8be99d0e" +} diff --git a/backend/.sqlx/query-0e518900101ca3ba736e18a911a07759368f0481b3c0dcd4702c077cdadb7ef0.json b/backend/.sqlx/query-0e518900101ca3ba736e18a911a07759368f0481b3c0dcd4702c077cdadb7ef0.json new file mode 100644 index 0000000..0c3ca2f --- /dev/null +++ b/backend/.sqlx/query-0e518900101ca3ba736e18a911a07759368f0481b3c0dcd4702c077cdadb7ef0.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT generate_invitation_code()", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "generate_invitation_code", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "0e518900101ca3ba736e18a911a07759368f0481b3c0dcd4702c077cdadb7ef0" +} diff --git a/backend/.sqlx/query-0ea2a972775b14b5a22edb7b7a81f414993d8153aebd7121acbcbd54257f32bf.json b/backend/.sqlx/query-0ea2a972775b14b5a22edb7b7a81f414993d8153aebd7121acbcbd54257f32bf.json new file mode 100644 index 0000000..b6f2351 --- /dev/null +++ b/backend/.sqlx/query-0ea2a972775b14b5a22edb7b7a81f414993d8153aebd7121acbcbd54257f32bf.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE export_jobs SET download_count = download_count + 1 WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "0ea2a972775b14b5a22edb7b7a81f414993d8153aebd7121acbcbd54257f32bf" +} diff --git a/backend/.sqlx/query-0ef338c61969938be731e6187b77c4d7454c409079ecc073aa24a10d339b47f1.json b/backend/.sqlx/query-0ef338c61969938be731e6187b77c4d7454c409079ecc073aa24a10d339b47f1.json new file mode 100644 index 0000000..862c562 --- /dev/null +++ b/backend/.sqlx/query-0ef338c61969938be731e6187b77c4d7454c409079ecc073aa24a10d339b47f1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT p.name\n FROM plugins p\n JOIN community_plugins cp ON cp.plugin_id = p.id\n WHERE cp.community_id = $1 AND cp.is_active = true AND p.is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "0ef338c61969938be731e6187b77c4d7454c409079ecc073aa24a10d339b47f1" +} diff --git a/backend/.sqlx/query-10649f88f5a0055a0aa23cad37d0b51bd24c35ab290c412bad1286c0baaefd56.json b/backend/.sqlx/query-10649f88f5a0055a0aa23cad37d0b51bd24c35ab290c412bad1286c0baaefd56.json new file mode 100644 index 0000000..44309f9 --- /dev/null +++ b/backend/.sqlx/query-10649f88f5a0055a0aa23cad37d0b51bd24c35ab290c412bad1286c0baaefd56.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM notifications WHERE user_id = $1 AND is_read = false", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "10649f88f5a0055a0aa23cad37d0b51bd24c35ab290c412bad1286c0baaefd56" +} diff --git a/backend/.sqlx/query-10a632674ed7d086243d8a31abcb4935677106be2937daf876b01434fc07d293.json b/backend/.sqlx/query-10a632674ed7d086243d8a31abcb4935677106be2937daf876b01434fc07d293.json new file mode 100644 index 0000000..59f6506 --- /dev/null +++ b/backend/.sqlx/query-10a632674ed7d086243d8a31abcb4935677106be2937daf876b01434fc07d293.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT fp.id, fp.federation_id\n FROM federated_proposals fp\n WHERE fp.local_proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "federation_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "10a632674ed7d086243d8a31abcb4935677106be2937daf876b01434fc07d293" +} diff --git a/backend/.sqlx/query-117802a0a1de4ec2e5e3e7d2fd557421991b07876c7b42760bbe756b979d0e96.json b/backend/.sqlx/query-117802a0a1de4ec2e5e3e7d2fd557421991b07876c7b42760bbe756b979d0e96.json new file mode 100644 index 0000000..ed26046 --- /dev/null +++ b/backend/.sqlx/query-117802a0a1de4ec2e5e3e7d2fd557421991b07876c7b42760bbe756b979d0e96.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE compromise_proposals SET\n party_b_response = $2,\n party_b_response_at = NOW(),\n party_b_feedback = $3,\n updated_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "117802a0a1de4ec2e5e3e7d2fd557421991b07876c7b42760bbe756b979d0e96" +} diff --git a/backend/.sqlx/query-132ee5d7ab197b3a1d3bce2740f419e3cc2311e3cf0bbd0dfe821eebb4ca4fe0.json b/backend/.sqlx/query-132ee5d7ab197b3a1d3bce2740f419e3cc2311e3cf0bbd0dfe821eebb4ca4fe0.json new file mode 100644 index 0000000..94e510b --- /dev/null +++ b/backend/.sqlx/query-132ee5d7ab197b3a1d3bce2740f419e3cc2311e3cf0bbd0dfe821eebb4ca4fe0.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegate_profiles (user_id, display_name, bio, accepting_delegations, delegation_policy)\n VALUES ($1, $2, $3, $4, $5)\n ON CONFLICT (user_id) DO UPDATE SET \n display_name = COALESCE($2, delegate_profiles.display_name),\n bio = COALESCE($3, delegate_profiles.bio),\n accepting_delegations = COALESCE($4, delegate_profiles.accepting_delegations),\n delegation_policy = COALESCE($5, delegate_profiles.delegation_policy),\n updated_at = NOW()\n RETURNING display_name, bio, accepting_delegations, delegation_policy, \n total_delegators, total_votes_cast", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "bio", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "accepting_delegations", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "delegation_policy", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "total_delegators", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "total_votes_cast", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text", + "Bool", + "Text" + ] + }, + "nullable": [ + true, + true, + false, + true, + false, + false + ] + }, + "hash": "132ee5d7ab197b3a1d3bce2740f419e3cc2311e3cf0bbd0dfe821eebb4ca4fe0" +} diff --git a/backend/.sqlx/query-1414a9b0037bc888daa0b1215e88a78b20c7e2c614760f36307a248be45203ff.json b/backend/.sqlx/query-1414a9b0037bc888daa0b1215e88a78b20c7e2c614760f36307a248be45203ff.json new file mode 100644 index 0000000..7b43081 --- /dev/null +++ b/backend/.sqlx/query-1414a9b0037bc888daa0b1215e88a78b20c7e2c614760f36307a248be45203ff.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM community_members WHERE community_id = $1 AND role = 'admin'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "1414a9b0037bc888daa0b1215e88a78b20c7e2c614760f36307a248be45203ff" +} diff --git a/backend/.sqlx/query-14514b4a260abf4b93429a3d841bea19693db589ad668279d7361f499fe7e408.json b/backend/.sqlx/query-14514b4a260abf4b93429a3d841bea19693db589ad668279d7361f499fe7e408.json new file mode 100644 index 0000000..f94a546 --- /dev/null +++ b/backend/.sqlx/query-14514b4a260abf4b93429a3d841bea19693db589ad668279d7361f499fe7e408.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n community_id,\n overall_health_score::float8 AS overall_health_score,\n participation_score::float8 AS participation_score,\n efficiency_score::float8 AS efficiency_score,\n delegation_health_score::float8 AS delegation_health_score,\n power_concentration_risk\n FROM governance_health_indicators\n WHERE community_id = $1\n ORDER BY calculated_at DESC\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "overall_health_score", + "type_info": "Float8" + }, + { + "ordinal": 2, + "name": "participation_score", + "type_info": "Float8" + }, + { + "ordinal": 3, + "name": "efficiency_score", + "type_info": "Float8" + }, + { + "ordinal": 4, + "name": "delegation_health_score", + "type_info": "Float8" + }, + { + "ordinal": 5, + "name": "power_concentration_risk", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + null, + null, + null, + null, + true + ] + }, + "hash": "14514b4a260abf4b93429a3d841bea19693db589ad668279d7361f499fe7e408" +} diff --git a/backend/.sqlx/query-150c49c9bd09af829b05abd06d6217aec3ed104f6286e2fc6bd741512cd254ff.json b/backend/.sqlx/query-150c49c9bd09af829b05abd06d6217aec3ed104f6286e2fc6bd741512cd254ff.json new file mode 100644 index 0000000..843416c --- /dev/null +++ b/backend/.sqlx/query-150c49c9bd09af829b05abd06d6217aec3ed104f6286e2fc6bd741512cd254ff.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(\n (SELECT (cp.settings->>'auto_assign_mediators')::boolean\n FROM community_plugins cp\n JOIN plugins p ON p.id = cp.plugin_id\n WHERE cp.community_id = $1 AND p.name = 'conflict_resolution'),\n true\n ) AS \"auto_assign!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "auto_assign!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "150c49c9bd09af829b05abd06d6217aec3ed104f6286e2fc6bd741512cd254ff" +} diff --git a/backend/.sqlx/query-155f20f35f5b4df59e1a53d0aa30f2994b0a6f76fcd1ff443a6bbe13e3d41339.json b/backend/.sqlx/query-155f20f35f5b4df59e1a53d0aa30f2994b0a6f76fcd1ff443a6bbe13e3d41339.json new file mode 100644 index 0000000..7aa861b --- /dev/null +++ b/backend/.sqlx/query-155f20f35f5b4df59e1a53d0aa30f2994b0a6f76fcd1ff443a6bbe13e3d41339.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n COUNT(*) FILTER (WHERE reaction_type = 'agree') as agree,\n COUNT(*) FILTER (WHERE reaction_type = 'disagree') as disagree,\n COUNT(*) FILTER (WHERE reaction_type = 'insightful') as insightful,\n COUNT(*) FILTER (WHERE reaction_type = 'off_topic') as off_topic,\n COUNT(*) FILTER (WHERE reaction_type = 'constructive') as constructive\n FROM comment_reactions\n WHERE comment_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "agree", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "disagree", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "insightful", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "off_topic", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "constructive", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null + ] + }, + "hash": "155f20f35f5b4df59e1a53d0aa30f2994b0a6f76fcd1ff443a6bbe13e3d41339" +} diff --git a/backend/.sqlx/query-1648eb605182a87a4fadd7158f7012dfe8011a56d63ea069a53bfd697bcdd166.json b/backend/.sqlx/query-1648eb605182a87a4fadd7158f7012dfe8011a56d63ea069a53bfd697bcdd166.json new file mode 100644 index 0000000..630b5b9 --- /dev/null +++ b/backend/.sqlx/query-1648eb605182a87a4fadd7158f7012dfe8011a56d63ea069a53bfd697bcdd166.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT community_id, status as \"status: crate::models::ProposalStatus\", voting_method FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "status: crate::models::ProposalStatus", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "voting_method", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "1648eb605182a87a4fadd7158f7012dfe8011a56d63ea069a53bfd697bcdd166" +} diff --git a/backend/.sqlx/query-171bd1d2e58286b428275404fbaaaedab5d2ee9d3c0b76e3182b6fd48493cdc4.json b/backend/.sqlx/query-171bd1d2e58286b428275404fbaaaedab5d2ee9d3c0b76e3182b6fd48493cdc4.json new file mode 100644 index 0000000..16e295b --- /dev/null +++ b/backend/.sqlx/query-171bd1d2e58286b428275404fbaaaedab5d2ee9d3c0b76e3182b6fd48493cdc4.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO mediation_notes (\n conflict_id, session_id, author_id, content, note_type, is_confidential\n ) VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Text", + "Varchar", + "Bool" + ] + }, + "nullable": [ + false + ] + }, + "hash": "171bd1d2e58286b428275404fbaaaedab5d2ee9d3c0b76e3182b6fd48493cdc4" +} diff --git a/backend/.sqlx/query-174c2a9d861710b570536d1350b4e70b13a9290b222eb89ce753a5702de63f0a.json b/backend/.sqlx/query-174c2a9d861710b570536d1350b4e70b13a9290b222eb89ce753a5702de63f0a.json new file mode 100644 index 0000000..a19fe86 --- /dev/null +++ b/backend/.sqlx/query-174c2a9d861710b570536d1350b4e70b13a9290b222eb89ce753a5702de63f0a.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT v.created_at as voted_at, po.label as option_label,\n p.id as proposal_id, p.title as proposal_title,\n c.name as community_name\n FROM votes v\n JOIN voting_identities vi ON v.voter_id = vi.id\n JOIN proposal_options po ON v.option_id = po.id\n JOIN proposals p ON po.proposal_id = p.id\n JOIN communities c ON p.community_id = c.id\n WHERE vi.user_id = $1\n ORDER BY v.created_at DESC\n LIMIT 20\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "voted_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 1, + "name": "option_label", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "proposal_title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "community_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "174c2a9d861710b570536d1350b4e70b13a9290b222eb89ce753a5702de63f0a" +} diff --git a/backend/.sqlx/query-177d2a77cdaaa75e32bae953e520231ce713d5fb5f65f533660c773417be085a.json b/backend/.sqlx/query-177d2a77cdaaa75e32bae953e520231ce713d5fb5f65f533660c773417be085a.json new file mode 100644 index 0000000..fec9de3 --- /dev/null +++ b/backend/.sqlx/query-177d2a77cdaaa75e32bae953e520231ce713d5fb5f65f533660c773417be085a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT approve_community($1, $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "approve_community", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "177d2a77cdaaa75e32bae953e520231ce713d5fb5f65f533660c773417be085a" +} diff --git a/backend/.sqlx/query-18c0fb05da45a3eea514f660bc4ac4d6aca71442645666a9c08db8f2a564ff6c.json b/backend/.sqlx/query-18c0fb05da45a3eea514f660bc4ac4d6aca71442645666a9c08db8f2a564ff6c.json new file mode 100644 index 0000000..fe734e3 --- /dev/null +++ b/backend/.sqlx/query-18c0fb05da45a3eea514f660bc4ac4d6aca71442645666a9c08db8f2a564ff6c.json @@ -0,0 +1,74 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, setup_completed, instance_name, platform_mode,\n registration_enabled, registration_mode,\n default_community_visibility, allow_private_communities,\n default_plugin_policy, default_moderation_mode\n FROM instance_settings LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "setup_completed", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "instance_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "platform_mode", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "registration_enabled", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "registration_mode", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "default_community_visibility", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "allow_private_communities", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "default_plugin_policy", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "default_moderation_mode", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "18c0fb05da45a3eea514f660bc4ac4d6aca71442645666a9c08db8f2a564ff6c" +} diff --git a/backend/.sqlx/query-18f2bb2a5454308a0ca13ad574947eaa83e52ab32ba48687f54cdce41b34a141.json b/backend/.sqlx/query-18f2bb2a5454308a0ca13ad574947eaa83e52ab32ba48687f54cdce41b34a141.json new file mode 100644 index 0000000..1d0db54 --- /dev/null +++ b/backend/.sqlx/query-18f2bb2a5454308a0ca13ad574947eaa83e52ab32ba48687f54cdce41b34a141.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT gi.id, gi.title, gi.description, gi.proposal_id\n FROM gitlab_issues gi\n JOIN gitlab_connections gc ON gi.connection_id = gc.id\n WHERE gi.id = $1 AND gc.community_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "proposal_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + true + ] + }, + "hash": "18f2bb2a5454308a0ca13ad574947eaa83e52ab32ba48687f54cdce41b34a141" +} diff --git a/backend/.sqlx/query-1ad83f237ca18e09dfac8f654b6befac12576bf30f35f3bf626b7ccf46a4fb94.json b/backend/.sqlx/query-1ad83f237ca18e09dfac8f654b6befac12576bf30f35f3bf626b7ccf46a4fb94.json new file mode 100644 index 0000000..052e7e8 --- /dev/null +++ b/backend/.sqlx/query-1ad83f237ca18e09dfac8f654b6befac12576bf30f35f3bf626b7ccf46a4fb94.json @@ -0,0 +1,82 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT i.*, c.name as community_name\n FROM invitations i\n LEFT JOIN communities c ON c.id = i.community_id\n WHERE i.code = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "code", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "created_by", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "max_uses", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "uses_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "expires_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "community_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + false + ] + }, + "hash": "1ad83f237ca18e09dfac8f654b6befac12576bf30f35f3bf626b7ccf46a4fb94" +} diff --git a/backend/.sqlx/query-1cf0926848f8f1fc1f62337b344f06d86408f94450fe0ed80a44268657e92c06.json b/backend/.sqlx/query-1cf0926848f8f1fc1f62337b344f06d86408f94450fe0ed80a44268657e92c06.json new file mode 100644 index 0000000..5524b24 --- /dev/null +++ b/backend/.sqlx/query-1cf0926848f8f1fc1f62337b344f06d86408f94450fe0ed80a44268657e92c06.json @@ -0,0 +1,84 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE voting_method_plugins SET\n is_active = COALESCE($2, is_active),\n is_default = COALESCE($3, is_default),\n updated_at = NOW()\n WHERE id = $1\n RETURNING id, name, display_name, description, icon, is_active, is_default,\n config_schema, default_config, complexity_level, supports_delegation", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "icon", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "config_schema", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "default_config", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "complexity_level", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "supports_delegation", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Bool", + "Bool" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + false + ] + }, + "hash": "1cf0926848f8f1fc1f62337b344f06d86408f94450fe0ed80a44268657e92c06" +} diff --git a/backend/.sqlx/query-1dcd2fc713e43bb6e785befc420fe94db65d42df35bd8015c550c2f8666664e7.json b/backend/.sqlx/query-1dcd2fc713e43bb6e785befc420fe94db65d42df35bd8015c550c2f8666664e7.json new file mode 100644 index 0000000..3bf9045 --- /dev/null +++ b/backend/.sqlx/query-1dcd2fc713e43bb6e785befc420fe94db65d42df35bd8015c550c2f8666664e7.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE rule_violations \n SET status = 'dismissed', \n reviewed_by = $2, \n reviewed_at = NOW(),\n review_notes = $3,\n resolved_at = NOW(),\n resolution_type = 'dismissed'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "1dcd2fc713e43bb6e785befc420fe94db65d42df35bd8015c550c2f8666664e7" +} diff --git a/backend/.sqlx/query-1dedda4e97c32d56c88d29a247f788e5aac67cc9bc59fdd52b40926af3e5a671.json b/backend/.sqlx/query-1dedda4e97c32d56c88d29a247f788e5aac67cc9bc59fdd52b40926af3e5a671.json new file mode 100644 index 0000000..97bd853 --- /dev/null +++ b/backend/.sqlx/query-1dedda4e97c32d56c88d29a247f788e5aac67cc9bc59fdd52b40926af3e5a671.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, gitlab_url, project_path, is_active,\n sync_issues, sync_merge_requests, auto_create_proposals, last_synced_at\n FROM gitlab_connections WHERE community_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "gitlab_url", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "project_path", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "sync_issues", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "sync_merge_requests", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "auto_create_proposals", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "last_synced_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "1dedda4e97c32d56c88d29a247f788e5aac67cc9bc59fdd52b40926af3e5a671" +} diff --git a/backend/.sqlx/query-1efa9b54dff5200841ab35a40e525a8d0da40edca89aa78cd168ae3e854eabc2.json b/backend/.sqlx/query-1efa9b54dff5200841ab35a40e525a8d0da40edca89aa78cd168ae3e854eabc2.json new file mode 100644 index 0000000..9f9767e --- /dev/null +++ b/backend/.sqlx/query-1efa9b54dff5200841ab35a40e525a8d0da40edca89aa78cd168ae3e854eabc2.json @@ -0,0 +1,76 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM users WHERE username = $1 AND is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "password_hash", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "is_admin", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "invited_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "1efa9b54dff5200841ab35a40e525a8d0da40edca89aa78cd168ae3e854eabc2" +} diff --git a/backend/.sqlx/query-1fd0e024053913d7598c5cdb19f407cfa7fef149553e18efc9e5149dd77e7f1c.json b/backend/.sqlx/query-1fd0e024053913d7598c5cdb19f407cfa7fef149553e18efc9e5149dd77e7f1c.json new file mode 100644 index 0000000..ce14339 --- /dev/null +++ b/backend/.sqlx/query-1fd0e024053913d7598c5cdb19f407cfa7fef149553e18efc9e5149dd77e7f1c.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT author_id, status as \"status: crate::models::ProposalStatus\", title FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "status: crate::models::ProposalStatus", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "1fd0e024053913d7598c5cdb19f407cfa7fef149553e18efc9e5149dd77e7f1c" +} diff --git a/backend/.sqlx/query-20414e126202ce893b8e967c585ac6ee9c6cc869033bbe7bbf959d80dcedf82b.json b/backend/.sqlx/query-20414e126202ce893b8e967c585ac6ee9c6cc869033bbe7bbf959d80dcedf82b.json new file mode 100644 index 0000000..1df96c1 --- /dev/null +++ b/backend/.sqlx/query-20414e126202ce893b8e967c585ac6ee9c6cc869033bbe7bbf959d80dcedf82b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM comment_reactions WHERE comment_id = $1 AND user_id = $2 AND reaction_type = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "20414e126202ce893b8e967c585ac6ee9c6cc869033bbe7bbf959d80dcedf82b" +} diff --git a/backend/.sqlx/query-21412c1fff5f48e1ca0a5a67c49180efba50cdbc247a467474296f00a4f1f0f2.json b/backend/.sqlx/query-21412c1fff5f48e1ca0a5a67c49180efba50cdbc247a467474296f00a4f1f0f2.json new file mode 100644 index 0000000..0cedc74 --- /dev/null +++ b/backend/.sqlx/query-21412c1fff5f48e1ca0a5a67c49180efba50cdbc247a467474296f00a4f1f0f2.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT plugin_name, is_enabled, config FROM instance_plugins ORDER BY plugin_name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "plugin_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_enabled", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true + ] + }, + "hash": "21412c1fff5f48e1ca0a5a67c49180efba50cdbc247a467474296f00a4f1f0f2" +} diff --git a/backend/.sqlx/query-222ef714ee6f0d715f09643cff679f1c9d5132051dea4cf1f809f06470ed9b44.json b/backend/.sqlx/query-222ef714ee6f0d715f09643cff679f1c9d5132051dea4cf1f809f06470ed9b44.json new file mode 100644 index 0000000..d1e3f0a --- /dev/null +++ b/backend/.sqlx/query-222ef714ee6f0d715f09643cff679f1c9d5132051dea4cf1f809f06470ed9b44.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT option_id, SUM(credits) as total_credits, COUNT(DISTINCT voter_id) as voter_count\n FROM quadratic_votes WHERE proposal_id = $1 GROUP BY option_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "option_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "total_credits", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "voter_count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + null, + null + ] + }, + "hash": "222ef714ee6f0d715f09643cff679f1c9d5132051dea4cf1f809f06470ed9b44" +} diff --git a/backend/.sqlx/query-2376b5dff203895eb0b78454a323beb8a90d4ea20ca201226d4b228a2b846c4f.json b/backend/.sqlx/query-2376b5dff203895eb0b78454a323beb8a90d4ea20ca201226d4b228a2b846c4f.json new file mode 100644 index 0000000..f6bc472 --- /dev/null +++ b/backend/.sqlx/query-2376b5dff203895eb0b78454a323beb8a90d4ea20ca201226d4b228a2b846c4f.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, username, display_name, created_at FROM users WHERE username = $1 AND is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + true, + false + ] + }, + "hash": "2376b5dff203895eb0b78454a323beb8a90d4ea20ca201226d4b228a2b846c4f" +} diff --git a/backend/.sqlx/query-274cc3e62a3bc6659ae9e652d6ac15076f76a5fb7acad44f5e853678cfa2abf3.json b/backend/.sqlx/query-274cc3e62a3bc6659ae9e652d6ac15076f76a5fb7acad44f5e853678cfa2abf3.json new file mode 100644 index 0000000..3c362f7 --- /dev/null +++ b/backend/.sqlx/query-274cc3e62a3bc6659ae9e652d6ac15076f76a5fb7acad44f5e853678cfa2abf3.json @@ -0,0 +1,142 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n ml.id,\n ml.sequence_number,\n ml.community_id,\n ml.actor_user_id,\n u.username AS \"actor_username?\",\n ml.actor_role,\n ml.action_type::text AS \"action_type!\",\n ml.target_type,\n ml.target_id,\n ml.target_snapshot,\n ml.reason,\n ml.rule_reference,\n ml.evidence,\n ml.duration_hours,\n ml.expires_at,\n ml.decision_type,\n ml.vote_proposal_id,\n ml.vote_result,\n ml.previous_hash,\n ml.entry_hash,\n ml.created_at\n FROM moderation_ledger ml\n LEFT JOIN users u ON u.id = ml.actor_user_id\n WHERE ml.id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "sequence_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "actor_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "actor_username?", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "actor_role", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "action_type!", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "target_type", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "target_id", + "type_info": "Uuid" + }, + { + "ordinal": 9, + "name": "target_snapshot", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "rule_reference", + "type_info": "Text" + }, + { + "ordinal": 12, + "name": "evidence", + "type_info": "Jsonb" + }, + { + "ordinal": 13, + "name": "duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "expires_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 16, + "name": "vote_proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 17, + "name": "vote_result", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "previous_hash", + "type_info": "Text" + }, + { + "ordinal": 19, + "name": "entry_hash", + "type_info": "Text" + }, + { + "ordinal": 20, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + null, + false, + false, + true, + false, + true, + true, + true, + true, + false, + true, + true, + false, + false, + false + ] + }, + "hash": "274cc3e62a3bc6659ae9e652d6ac15076f76a5fb7acad44f5e853678cfa2abf3" +} diff --git a/backend/.sqlx/query-27723721585f30329494e2c80e4851b65cd980780e4b756019f73df0f9d28061.json b/backend/.sqlx/query-27723721585f30329494e2c80e4851b65cd980780e4b756019f73df0f9d28061.json new file mode 100644 index 0000000..d7293d9 --- /dev/null +++ b/backend/.sqlx/query-27723721585f30329494e2c80e4851b65cd980780e4b756019f73df0f9d28061.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, summary_type::text AS \"summary_type!\",\n content, version, is_approved\n FROM deliberation_summaries\n WHERE proposal_id = $1\n ORDER BY summary_type", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "summary_type!", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "version", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "is_approved", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + null, + false, + false, + false + ] + }, + "hash": "27723721585f30329494e2c80e4851b65cd980780e4b756019f73df0f9d28061" +} diff --git a/backend/.sqlx/query-28654edaba50887bd4c7f698e538ed9ec70543b1ba6d0ecc5b675a1040148b6a.json b/backend/.sqlx/query-28654edaba50887bd4c7f698e538ed9ec70543b1ba6d0ecc5b675a1040148b6a.json new file mode 100644 index 0000000..064d408 --- /dev/null +++ b/backend/.sqlx/query-28654edaba50887bd4c7f698e538ed9ec70543b1ba6d0ecc5b675a1040148b6a.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n m.id, m.community_id, m.action_type, m.reason, m.details, m.created_at,\n mod_user.username as moderator_username,\n target_user.username as target_username\n FROM moderation_log m\n LEFT JOIN users mod_user ON m.moderator_id = mod_user.id\n LEFT JOIN users target_user ON m.target_user_id = target_user.id\n WHERE m.community_id = $1\n ORDER BY m.created_at DESC\n LIMIT 50\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "action_type", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "details", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "moderator_username", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "target_username", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false, + false + ] + }, + "hash": "28654edaba50887bd4c7f698e538ed9ec70543b1ba6d0ecc5b675a1040148b6a" +} diff --git a/backend/.sqlx/query-2a13dcd10626c05bfed533ba131e46f2ba7c132c9e5d58afe30ba11179096bce.json b/backend/.sqlx/query-2a13dcd10626c05bfed533ba131e46f2ba7c132c9e5d58afe30ba11179096bce.json new file mode 100644 index 0000000..99f517d --- /dev/null +++ b/backend/.sqlx/query-2a13dcd10626c05bfed533ba131e46f2ba7c132c9e5d58afe30ba11179096bce.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE communities\n SET settings = settings || $2::jsonb,\n updated_at = NOW()\n WHERE id = $1 AND is_active = true\n RETURNING settings as \"settings!: serde_json::Value\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Jsonb" + ] + }, + "nullable": [ + false + ] + }, + "hash": "2a13dcd10626c05bfed533ba131e46f2ba7c132c9e5d58afe30ba11179096bce" +} diff --git a/backend/.sqlx/query-2a288385b0b4f402803e22171965bedeeb6e40308d09f4a0d4f67258b11f7cd2.json b/backend/.sqlx/query-2a288385b0b4f402803e22171965bedeeb6e40308d09f4a0d4f67258b11f7cd2.json new file mode 100644 index 0000000..13e8023 --- /dev/null +++ b/backend/.sqlx/query-2a288385b0b4f402803e22171965bedeeb6e40308d09f4a0d4f67258b11f7cd2.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT p.id, p.title, p.status as \"status: String\", p.created_at, c.slug as community_slug\n FROM proposals p\n JOIN communities c ON p.community_id = c.id\n WHERE c.is_active = true\n ORDER BY p.created_at DESC\n LIMIT 10\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "status: String", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "community_slug", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "2a288385b0b4f402803e22171965bedeeb6e40308d09f4a0d4f67258b11f7cd2" +} diff --git a/backend/.sqlx/query-2a7e168921469392cdde35401ab602fdb281f995ad75050af27a4130c311a920.json b/backend/.sqlx/query-2a7e168921469392cdde35401ab602fdb281f995ad75050af27a4130c311a920.json new file mode 100644 index 0000000..2bc9230 --- /dev/null +++ b/backend/.sqlx/query-2a7e168921469392cdde35401ab602fdb281f995ad75050af27a4130c311a920.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_voting_methods (community_id, voting_method_id, is_enabled, is_default, config)\n VALUES ($1, $2, $3, $4, $5)\n ON CONFLICT (community_id, voting_method_id) DO UPDATE SET\n is_enabled = COALESCE($3, community_voting_methods.is_enabled),\n is_default = COALESCE($4, community_voting_methods.is_default),\n config = COALESCE($5, community_voting_methods.config),\n updated_at = NOW()\n RETURNING id, is_enabled, is_default, config", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "is_enabled", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Bool", + "Bool", + "Jsonb" + ] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "2a7e168921469392cdde35401ab602fdb281f995ad75050af27a4130c311a920" +} diff --git a/backend/.sqlx/query-2b38c2a93dda00a2ccd0ffe3d2cd83b9cf41d444b6f09b211a57dd6689a9e27e.json b/backend/.sqlx/query-2b38c2a93dda00a2ccd0ffe3d2cd83b9cf41d444b6f09b211a57dd6689a9e27e.json new file mode 100644 index 0000000..d241389 --- /dev/null +++ b/backend/.sqlx/query-2b38c2a93dda00a2ccd0ffe3d2cd83b9cf41d444b6f09b211a57dd6689a9e27e.json @@ -0,0 +1,88 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT d.id, d.delegator_id, d.delegate_id, u.username as delegator_username,\n d.scope as \"scope: DelegationScope\", d.community_id, d.topic_id, \n d.proposal_id, d.is_active, d.created_at\n FROM delegations d\n JOIN users u ON d.delegator_id = u.id\n WHERE d.delegate_id = $1 AND d.is_active = TRUE\n ORDER BY d.created_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "delegator_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "delegate_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "delegator_username", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "scope: DelegationScope", + "type_info": { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + } + }, + { + "ordinal": 5, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "topic_id", + "type_info": "Uuid" + }, + { + "ordinal": 7, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + true, + true, + true, + false, + false + ] + }, + "hash": "2b38c2a93dda00a2ccd0ffe3d2cd83b9cf41d444b6f09b211a57dd6689a9e27e" +} diff --git a/backend/.sqlx/query-2c9bb8a5dca54ef476b2908dfad34371f6f34e4a8500f855d01209d32840f683.json b/backend/.sqlx/query-2c9bb8a5dca54ef476b2908dfad34371f6f34e4a8500f855d01209d32840f683.json new file mode 100644 index 0000000..3e57a90 --- /dev/null +++ b/backend/.sqlx/query-2c9bb8a5dca54ef476b2908dfad34371f6f34e4a8500f855d01209d32840f683.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(DISTINCT voter_id) FROM quadratic_votes WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "2c9bb8a5dca54ef476b2908dfad34371f6f34e4a8500f855d01209d32840f683" +} diff --git a/backend/.sqlx/query-2ca4dcd48f294e84793ad46b960657a7d30cf790418baf4feab371fb45687627.json b/backend/.sqlx/query-2ca4dcd48f294e84793ad46b960657a7d30cf790418baf4feab371fb45687627.json new file mode 100644 index 0000000..23875c2 --- /dev/null +++ b/backend/.sqlx/query-2ca4dcd48f294e84793ad46b960657a7d30cf790418baf4feab371fb45687627.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE deliberation_summaries SET\n content = $2, key_points = $3, last_editor_id = $4,\n version = version + 1, edit_count = edit_count + 1,\n is_approved = false, updated_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "2ca4dcd48f294e84793ad46b960657a7d30cf790418baf4feab371fb45687627" +} diff --git a/backend/.sqlx/query-2cb27026dc438da39c4132e9679d8312ed4909560135b5660c82e06ed61e6436.json b/backend/.sqlx/query-2cb27026dc438da39c4132e9679d8312ed4909560135b5660c82e06ed61e6436.json new file mode 100644 index 0000000..558fddf --- /dev/null +++ b/backend/.sqlx/query-2cb27026dc438da39c4132e9679d8312ed4909560135b5660c82e06ed61e6436.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload)\n VALUES ($1, $2, NULL, 'plugin.policy_updated', $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "2cb27026dc438da39c4132e9679d8312ed4909560135b5660c82e06ed61e6436" +} diff --git a/backend/.sqlx/query-2d6f151a2ed11e1a6efecfe1c1cd0242e3e28942cfbae4db71b4d9d709f96522.json b/backend/.sqlx/query-2d6f151a2ed11e1a6efecfe1c1cd0242e3e28942cfbae4db71b4d9d709f96522.json new file mode 100644 index 0000000..3149759 --- /dev/null +++ b/backend/.sqlx/query-2d6f151a2ed11e1a6efecfe1c1cd0242e3e28942cfbae4db71b4d9d709f96522.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM comments WHERE proposal_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "2d6f151a2ed11e1a6efecfe1c1cd0242e3e28942cfbae4db71b4d9d709f96522" +} diff --git a/backend/.sqlx/query-2f7f1f0d1960020529d167240c56abc5bcc6fcd5615890279d2bafcde467fe61.json b/backend/.sqlx/query-2f7f1f0d1960020529d167240c56abc5bcc6fcd5615890279d2bafcde467fe61.json new file mode 100644 index 0000000..b3971c6 --- /dev/null +++ b/backend/.sqlx/query-2f7f1f0d1960020529d167240c56abc5bcc6fcd5615890279d2bafcde467fe61.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n is_valid,\n total_entries,\n broken_at_sequence,\n expected_hash,\n actual_hash,\n error_message\n FROM verify_ledger_chain($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_valid", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "total_entries", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "broken_at_sequence", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "expected_hash", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "actual_hash", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "error_message", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null, + null + ] + }, + "hash": "2f7f1f0d1960020529d167240c56abc5bcc6fcd5615890279d2bafcde467fe61" +} diff --git a/backend/.sqlx/query-2f80cf2d27e9af290578740b2752a6a0d8c41e62aab523e16cc8cf123e59166f.json b/backend/.sqlx/query-2f80cf2d27e9af290578740b2752a6a0d8c41e62aab523e16cc8cf123e59166f.json new file mode 100644 index 0000000..241bc2d --- /dev/null +++ b/backend/.sqlx/query-2f80cf2d27e9af290578740b2752a6a0d8c41e62aab523e16cc8cf123e59166f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposal_amendments SET\n status = 'accepted',\n reviewed_by = $2,\n reviewed_at = NOW(),\n review_response = $3\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "2f80cf2d27e9af290578740b2752a6a0d8c41e62aab523e16cc8cf123e59166f" +} diff --git a/backend/.sqlx/query-31639da003249b784d3603126cb36a1ea592bd819b8f43d9cbd192d7d405c44d.json b/backend/.sqlx/query-31639da003249b784d3603126cb36a1ea592bd819b8f43d9cbd192d7d405c44d.json new file mode 100644 index 0000000..80f364f --- /dev/null +++ b/backend/.sqlx/query-31639da003249b784d3603126cb36a1ea592bd819b8f43d9cbd192d7d405c44d.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM communities WHERE is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "31639da003249b784d3603126cb36a1ea592bd819b8f43d9cbd192d7d405c44d" +} diff --git a/backend/.sqlx/query-3167dfbfa487a695ad0953ed59b206fe16833cfe890fdae3fd74d6fe2d11ced5.json b/backend/.sqlx/query-3167dfbfa487a695ad0953ed59b206fe16833cfe890fdae3fd74d6fe2d11ced5.json new file mode 100644 index 0000000..be25705 --- /dev/null +++ b/backend/.sqlx/query-3167dfbfa487a695ad0953ed59b206fe16833cfe890fdae3fd74d6fe2d11ced5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(\n SELECT 1 FROM user_roles ur\n JOIN roles r ON r.id = ur.role_id\n WHERE ur.user_id = $1 AND r.name IN ('platform_admin', 'platform_moderator')\n ) AS \"exists!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3167dfbfa487a695ad0953ed59b206fe16833cfe890fdae3fd74d6fe2d11ced5" +} diff --git a/backend/.sqlx/query-3205e4cb752317923752ee9ace04a93ada7aa8cbfc97a87f9ddf6376dc102146.json b/backend/.sqlx/query-3205e4cb752317923752ee9ace04a93ada7aa8cbfc97a87f9ddf6376dc102146.json new file mode 100644 index 0000000..898e6bb --- /dev/null +++ b/backend/.sqlx/query-3205e4cb752317923752ee9ace04a93ada7aa8cbfc97a87f9ddf6376dc102146.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE export_jobs SET status = 'expired'\n WHERE status = 'completed' AND download_expires_at < NOW()\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "3205e4cb752317923752ee9ace04a93ada7aa8cbfc97a87f9ddf6376dc102146" +} diff --git a/backend/.sqlx/query-3271a11a6d1ad3cc05deda6d3e33bd350180c0b20da49b88c5bc04d5f06eb927.json b/backend/.sqlx/query-3271a11a6d1ad3cc05deda6d3e33bd350180c0b20da49b88c5bc04d5f06eb927.json new file mode 100644 index 0000000..9fa0850 --- /dev/null +++ b/backend/.sqlx/query-3271a11a6d1ad3cc05deda6d3e33bd350180c0b20da49b88c5bc04d5f06eb927.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n total_delegations,\n unique_delegators,\n unique_delegates,\n max_chain_depth,\n avg_chain_depth::float8 AS avg_chain_depth,\n top_10_delegate_share::float8 AS top_10_share,\n herfindahl_index::float8 AS hhi,\n effective_delegates\n FROM delegation_analytics\n WHERE community_id = $1\n ORDER BY snapshot_date DESC\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_delegations", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "unique_delegators", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "unique_delegates", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "max_chain_depth", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "avg_chain_depth", + "type_info": "Float8" + }, + { + "ordinal": 5, + "name": "top_10_share", + "type_info": "Float8" + }, + { + "ordinal": 6, + "name": "hhi", + "type_info": "Float8" + }, + { + "ordinal": 7, + "name": "effective_delegates", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + null, + null, + true + ] + }, + "hash": "3271a11a6d1ad3cc05deda6d3e33bd350180c0b20da49b88c5bc04d5f06eb927" +} diff --git a/backend/.sqlx/query-32e740982952a1d3756117f496bcff626f7bcb438a50fa8eef80e013a84e8048.json b/backend/.sqlx/query-32e740982952a1d3756117f496bcff626f7bcb438a50fa8eef80e013a84e8048.json new file mode 100644 index 0000000..c13e015 --- /dev/null +++ b/backend/.sqlx/query-32e740982952a1d3756117f496bcff626f7bcb438a50fa8eef80e013a84e8048.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO federated_proposals \n (federation_id, local_proposal_id, remote_proposal_id, is_origin_local)\n VALUES ($1, $2, $2, true)\n ON CONFLICT DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "32e740982952a1d3756117f496bcff626f7bcb438a50fa8eef80e013a84e8048" +} diff --git a/backend/.sqlx/query-3316c52a6ddc9891f4482e6f5df622147b1be87d0723bf709d6d7def94eb1a0c.json b/backend/.sqlx/query-3316c52a6ddc9891f4482e6f5df622147b1be87d0723bf709d6d7def94eb1a0c.json new file mode 100644 index 0000000..1a65716 --- /dev/null +++ b/backend/.sqlx/query-3316c52a6ddc9891f4482e6f5df622147b1be87d0723bf709d6d7def94eb1a0c.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE delegations \n SET is_active = FALSE, revoked_at = NOW()\n WHERE delegator_id = $1 \n AND scope = $2::delegation_scope\n AND is_active = TRUE\n AND (community_id = $3 OR ($3 IS NULL AND community_id IS NULL))\n AND (topic_id = $4 OR ($4 IS NULL AND topic_id IS NULL))\n AND (proposal_id = $5 OR ($5 IS NULL AND proposal_id IS NULL))", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + }, + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "3316c52a6ddc9891f4482e6f5df622147b1be87d0723bf709d6d7def94eb1a0c" +} diff --git a/backend/.sqlx/query-33281e190171ace099ef3209d49ac42b6527824947520336d7d11594ab56b265.json b/backend/.sqlx/query-33281e190171ace099ef3209d49ac42b6527824947520336d7d11594ab56b265.json new file mode 100644 index 0000000..26b1460 --- /dev/null +++ b/backend/.sqlx/query-33281e190171ace099ef3209d49ac42b6527824947520336d7d11594ab56b265.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload)\n VALUES ($1, $2, NULL, 'plugin.package_uploaded', $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "33281e190171ace099ef3209d49ac42b6527824947520336d7d11594ab56b265" +} diff --git a/backend/.sqlx/query-34ac1a3b360d0e99c80d59aad7497794803b0ca05cb498e7715344bc9d512084.json b/backend/.sqlx/query-34ac1a3b360d0e99c80d59aad7497794803b0ca05cb498e7715344bc9d512084.json new file mode 100644 index 0000000..8f45d21 --- /dev/null +++ b/backend/.sqlx/query-34ac1a3b360d0e99c80d59aad7497794803b0ca05cb498e7715344bc9d512084.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE rule_violations \n SET status = 'pending_vote', \n reviewed_by = $2, \n reviewed_at = NOW(),\n review_notes = $3,\n escalation_level = $4\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "34ac1a3b360d0e99c80d59aad7497794803b0ca05cb498e7715344bc9d512084" +} diff --git a/backend/.sqlx/query-35b2d2fb4f7db1ce97557c01de71c96e64862e3955e1e2d5996581eb4b871f3d.json b/backend/.sqlx/query-35b2d2fb4f7db1ce97557c01de71c96e64862e3955e1e2d5996581eb4b871f3d.json new file mode 100644 index 0000000..50e1758 --- /dev/null +++ b/backend/.sqlx/query-35b2d2fb4f7db1ce97557c01de71c96e64862e3955e1e2d5996581eb4b871f3d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT user_has_permission($1, 'voting.methods.manage', $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "user_has_permission", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "35b2d2fb4f7db1ce97557c01de71c96e64862e3955e1e2d5996581eb4b871f3d" +} diff --git a/backend/.sqlx/query-3647ec42782a77237f075f172b4d435ac8d6325e60696018386ef1509e499a6d.json b/backend/.sqlx/query-3647ec42782a77237f075f172b4d435ac8d6325e60696018386ef1509e499a6d.json new file mode 100644 index 0000000..5d939a4 --- /dev/null +++ b/backend/.sqlx/query-3647ec42782a77237f075f172b4d435ac8d6325e60696018386ef1509e499a6d.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO federation_sync_log (federation_id, operation_type, direction, success)\n VALUES ($1, 'local_approval', 'push', true)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "3647ec42782a77237f075f172b4d435ac8d6325e60696018386ef1509e499a6d" +} diff --git a/backend/.sqlx/query-39926d0308364c2a13c987adbc8b364253e9d8d350d69a1e1d3efd1c2e424d81.json b/backend/.sqlx/query-39926d0308364c2a13c987adbc8b364253e9d8d350d69a1e1d3efd1c2e424d81.json new file mode 100644 index 0000000..e38565e --- /dev/null +++ b/backend/.sqlx/query-39926d0308364c2a13c987adbc8b364253e9d8d350d69a1e1d3efd1c2e424d81.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT community_id, rule_id, escalation_level FROM rule_violations WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "rule_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "escalation_level", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "39926d0308364c2a13c987adbc8b364253e9d8d350d69a1e1d3efd1c2e424d81" +} diff --git a/backend/.sqlx/query-3a395c6e9a23a87fbfa1b3d7a06fbe907b32e3cfb6af327cec1533b7762bd4cd.json b/backend/.sqlx/query-3a395c6e9a23a87fbfa1b3d7a06fbe907b32e3cfb6af327cec1533b7762bd4cd.json new file mode 100644 index 0000000..89843f8 --- /dev/null +++ b/backend/.sqlx/query-3a395c6e9a23a87fbfa1b3d7a06fbe907b32e3cfb6af327cec1533b7762bd4cd.json @@ -0,0 +1,151 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id\n FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "3a395c6e9a23a87fbfa1b3d7a06fbe907b32e3cfb6af327cec1533b7762bd4cd" +} diff --git a/backend/.sqlx/query-3a8154bdd76daa157200feea1cce96d67bdb7b7c824088ca625105e96495938f.json b/backend/.sqlx/query-3a8154bdd76daa157200feea1cce96d67bdb7b7c824088ca625105e96495938f.json new file mode 100644 index 0000000..56c865f --- /dev/null +++ b/backend/.sqlx/query-3a8154bdd76daa157200feea1cce96d67bdb7b7c824088ca625105e96495938f.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, template_id, current_phase_id, \n status, started_at, completed_at\n FROM workflow_instances\n WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "template_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "current_phase_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "status", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "started_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "completed_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + true + ] + }, + "hash": "3a8154bdd76daa157200feea1cce96d67bdb7b7c824088ca625105e96495938f" +} diff --git a/backend/.sqlx/query-3b034411e6338ddaeae97589f0d0ab13cd674f852ad61b643a2e593d252767f1.json b/backend/.sqlx/query-3b034411e6338ddaeae97589f0d0ab13cd674f852ad61b643a2e593d252767f1.json new file mode 100644 index 0000000..dbd56aa --- /dev/null +++ b/backend/.sqlx/query-3b034411e6338ddaeae97589f0d0ab13cd674f852ad61b643a2e593d252767f1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT name FROM plugins WHERE is_active = true AND name = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "TextArray" + ] + }, + "nullable": [ + false + ] + }, + "hash": "3b034411e6338ddaeae97589f0d0ab13cd674f852ad61b643a2e593d252767f1" +} diff --git a/backend/.sqlx/query-3c118e418b94a2d609cde609b53fbbb1a0055fe46397599f7c5eade17b0a5360.json b/backend/.sqlx/query-3c118e418b94a2d609cde609b53fbbb1a0055fe46397599f7c5eade17b0a5360.json new file mode 100644 index 0000000..2780884 --- /dev/null +++ b/backend/.sqlx/query-3c118e418b94a2d609cde609b53fbbb1a0055fe46397599f7c5eade17b0a5360.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM user_roles WHERE user_id = $1 AND role_id = $2 AND community_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "3c118e418b94a2d609cde609b53fbbb1a0055fe46397599f7c5eade17b0a5360" +} diff --git a/backend/.sqlx/query-3c323153097726bf967b733fc7cb40173a1c64a4b7535a904445e5d02dbe2f0f.json b/backend/.sqlx/query-3c323153097726bf967b733fc7cb40173a1c64a4b7535a904445e5d02dbe2f0f.json new file mode 100644 index 0000000..d8b7a9e --- /dev/null +++ b/backend/.sqlx/query-3c323153097726bf967b733fc7cb40173a1c64a4b7535a904445e5d02dbe2f0f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_options (proposal_id, label, sort_order) VALUES ($1, $2, $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "3c323153097726bf967b733fc7cb40173a1c64a4b7535a904445e5d02dbe2f0f" +} diff --git a/backend/.sqlx/query-3c47d27c939cb21fbcc621825d1ef59d5284c3b6db07e2b11d772fefbcf1650b.json b/backend/.sqlx/query-3c47d27c939cb21fbcc621825d1ef59d5284c3b6db07e2b11d772fefbcf1650b.json new file mode 100644 index 0000000..95d3962 --- /dev/null +++ b/backend/.sqlx/query-3c47d27c939cb21fbcc621825d1ef59d5284c3b6db07e2b11d772fefbcf1650b.json @@ -0,0 +1,72 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, stance::text AS \"stance!\",\n title, content, author_id,\n upvotes, downvotes, quality_score::float8 AS quality_score\n FROM deliberation_arguments\n WHERE proposal_id = $1 AND stance::text = $2 AND NOT is_hidden AND parent_id IS NULL\n ORDER BY quality_score DESC NULLS LAST\n LIMIT $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "stance!", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "upvotes", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "downvotes", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "quality_score", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Int8" + ] + }, + "nullable": [ + false, + false, + null, + false, + false, + false, + false, + false, + null + ] + }, + "hash": "3c47d27c939cb21fbcc621825d1ef59d5284c3b6db07e2b11d772fefbcf1650b" +} diff --git a/backend/.sqlx/query-3c769ff42e6b6e2df033fa05cf6d8ae1e9fba6320943f65c95613f56a8ea2a13.json b/backend/.sqlx/query-3c769ff42e6b6e2df033fa05cf6d8ae1e9fba6320943f65c95613f56a8ea2a13.json new file mode 100644 index 0000000..768f44b --- /dev/null +++ b/backend/.sqlx/query-3c769ff42e6b6e2df033fa05cf6d8ae1e9fba6320943f65c95613f56a8ea2a13.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_resource_reads (resource_id, user_id)\n VALUES ($1, $2)\n ON CONFLICT (resource_id, user_id) DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "3c769ff42e6b6e2df033fa05cf6d8ae1e9fba6320943f65c95613f56a8ea2a13" +} diff --git a/backend/.sqlx/query-3d523a0728ea6e9df275520675e3caab1963c61d5d6c3ad468cc51568d4bb5f8.json b/backend/.sqlx/query-3d523a0728ea6e9df275520675e3caab1963c61d5d6c3ad468cc51568d4bb5f8.json new file mode 100644 index 0000000..a3b2708 --- /dev/null +++ b/backend/.sqlx/query-3d523a0728ea6e9df275520675e3caab1963c61d5d6c3ad468cc51568d4bb5f8.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT name FROM communities WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "3d523a0728ea6e9df275520675e3caab1963c61d5d6c3ad468cc51568d4bb5f8" +} diff --git a/backend/.sqlx/query-3d9153f242fa24637d71a4b4f0a76edee15892248acb6b281ffdbab11a4bff0f.json b/backend/.sqlx/query-3d9153f242fa24637d71a4b4f0a76edee15892248acb6b281ffdbab11a4bff0f.json new file mode 100644 index 0000000..2a71331 --- /dev/null +++ b/backend/.sqlx/query-3d9153f242fa24637d71a4b4f0a76edee15892248acb6b281ffdbab11a4bff0f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT platform_mode FROM instance_settings LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "platform_mode", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "3d9153f242fa24637d71a4b4f0a76edee15892248acb6b281ffdbab11a4bff0f" +} diff --git a/backend/.sqlx/query-3e0e0fe2c4e51b68025965560101643e7e035b782b1cd0d110803664c5831fe3.json b/backend/.sqlx/query-3e0e0fe2c4e51b68025965560101643e7e035b782b1cd0d110803664c5831fe3.json new file mode 100644 index 0000000..a8cb05f --- /dev/null +++ b/backend/.sqlx/query-3e0e0fe2c4e51b68025965560101643e7e035b782b1cd0d110803664c5831fe3.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload)\n VALUES ($1, $2, $3, 'plugin.settings_updated', $4)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "3e0e0fe2c4e51b68025965560101643e7e035b782b1cd0d110803664c5831fe3" +} diff --git a/backend/.sqlx/query-3e7754fe4fe21c7fc50435ad222ed68617f8e8c4f2a21b202ee95d6f76ae0d32.json b/backend/.sqlx/query-3e7754fe4fe21c7fc50435ad222ed68617f8e8c4f2a21b202ee95d6f76ae0d32.json new file mode 100644 index 0000000..64a8f6c --- /dev/null +++ b/backend/.sqlx/query-3e7754fe4fe21c7fc50435ad222ed68617f8e8c4f2a21b202ee95d6f76ae0d32.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(\n (SELECT vm.name FROM community_voting_methods cvm \n JOIN voting_method_plugins vm ON vm.id = cvm.voting_method_id\n WHERE cvm.community_id = $1 AND cvm.is_default = true\n LIMIT 1),\n (SELECT name FROM voting_method_plugins WHERE is_default = true LIMIT 1),\n 'approval'\n ) as \"method!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "method!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3e7754fe4fe21c7fc50435ad222ed68617f8e8c4f2a21b202ee95d6f76ae0d32" +} diff --git a/backend/.sqlx/query-403544e4819bc7bcc4ac51c997fb9ec74c3b3fa2c6f2b170ab5b8c8c1d9c65d4.json b/backend/.sqlx/query-403544e4819bc7bcc4ac51c997fb9ec74c3b3fa2c6f2b170ab5b8c8c1d9c65d4.json new file mode 100644 index 0000000..2d334a5 --- /dev/null +++ b/backend/.sqlx/query-403544e4819bc7bcc4ac51c997fb9ec74c3b3fa2c6f2b170ab5b8c8c1d9c65d4.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE delegate_profiles SET total_delegators = GREATEST(0, total_delegators - 1) WHERE user_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "403544e4819bc7bcc4ac51c997fb9ec74c3b3fa2c6f2b170ab5b8c8c1d9c65d4" +} diff --git a/backend/.sqlx/query-41273682b6b15534294e7f90907c7a516cb658e276a50b44f72a2b739ee9e04b.json b/backend/.sqlx/query-41273682b6b15534294e7f90907c7a516cb658e276a50b44f72a2b739ee9e04b.json new file mode 100644 index 0000000..9856156 --- /dev/null +++ b/backend/.sqlx/query-41273682b6b15534294e7f90907c7a516cb658e276a50b44f72a2b739ee9e04b.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE community_voting_methods SET is_default = FALSE WHERE community_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "41273682b6b15534294e7f90907c7a516cb658e276a50b44f72a2b739ee9e04b" +} diff --git a/backend/.sqlx/query-42e0fab065d541e407129d44f86f1d1d46387494b992c81f7a334e72c42c98ca.json b/backend/.sqlx/query-42e0fab065d541e407129d44f86f1d1d46387494b992c81f7a334e72c42c98ca.json new file mode 100644 index 0000000..c8a2884 --- /dev/null +++ b/backend/.sqlx/query-42e0fab065d541e407129d44f86f1d1d46387494b992c81f7a334e72c42c98ca.json @@ -0,0 +1,81 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO invitations (code, created_by, email, community_id, max_uses, expires_at)\n VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id, code, created_by, email, community_id, max_uses, uses_count, \n expires_at, is_active, created_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "code", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "created_by", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "max_uses", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "uses_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "expires_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Varchar", + "Uuid", + "Int4", + "Timestamptz" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "42e0fab065d541e407129d44f86f1d1d46387494b992c81f7a334e72c42c98ca" +} diff --git a/backend/.sqlx/query-43a864f79078c6891186f1d20600983ee080a7033137c0d715880cf20fc7776a.json b/backend/.sqlx/query-43a864f79078c6891186f1d20600983ee080a7033137c0d715880cf20fc7776a.json new file mode 100644 index 0000000..09265e4 --- /dev/null +++ b/backend/.sqlx/query-43a864f79078c6891186f1d20600983ee080a7033137c0d715880cf20fc7776a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO conflict_history (conflict_id, action_type, action_description, actor_id)\n VALUES ($1, 'conflict_reported', 'Conflict case created', $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "43a864f79078c6891186f1d20600983ee080a7033137c0d715880cf20fc7776a" +} diff --git a/backend/.sqlx/query-4619c7159d4bec14be35a308b0a867e3d5b0687a46a456adf6e8dcd1582d3849.json b/backend/.sqlx/query-4619c7159d4bec14be35a308b0a867e3d5b0687a46a456adf6e8dcd1582d3849.json new file mode 100644 index 0000000..805c866 --- /dev/null +++ b/backend/.sqlx/query-4619c7159d4bec14be35a308b0a867e3d5b0687a46a456adf6e8dcd1582d3849.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO comment_reactions (comment_id, user_id, reaction_type) VALUES ($1, $2, $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "4619c7159d4bec14be35a308b0a867e3d5b0687a46a456adf6e8dcd1582d3849" +} diff --git a/backend/.sqlx/query-47ea00355af927b41b9c39e55791042049a4bea2d1fab669b4ef6fee3f7a3497.json b/backend/.sqlx/query-47ea00355af927b41b9c39e55791042049a4bea2d1fab669b4ef6fee3f7a3497.json new file mode 100644 index 0000000..b1b4744 --- /dev/null +++ b/backend/.sqlx/query-47ea00355af927b41b9c39e55791042049a4bea2d1fab669b4ef6fee3f7a3497.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT plugin_name, is_core, default_enabled FROM default_plugins", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "plugin_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_core", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "default_enabled", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "47ea00355af927b41b9c39e55791042049a4bea2d1fab669b4ef6fee3f7a3497" +} diff --git a/backend/.sqlx/query-47ea0156899876339e02f4769e666c24d5d0fee1f18869d7adcb4aeb007076c8.json b/backend/.sqlx/query-47ea0156899876339e02f4769e666c24d5d0fee1f18869d7adcb4aeb007076c8.json new file mode 100644 index 0000000..a418384 --- /dev/null +++ b/backend/.sqlx/query-47ea0156899876339e02f4769e666c24d5d0fee1f18869d7adcb4aeb007076c8.json @@ -0,0 +1,37 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO instance_plugins (plugin_name, is_enabled, config, enabled_by, enabled_at)\n VALUES ($1, $2, $3, $4, NOW())\n ON CONFLICT (plugin_name) DO UPDATE SET\n is_enabled = COALESCE($2, instance_plugins.is_enabled),\n config = COALESCE($3, instance_plugins.config),\n updated_at = NOW()\n RETURNING plugin_name, is_enabled, config", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "plugin_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_enabled", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Bool", + "Jsonb", + "Uuid" + ] + }, + "nullable": [ + false, + false, + true + ] + }, + "hash": "47ea0156899876339e02f4769e666c24d5d0fee1f18869d7adcb4aeb007076c8" +} diff --git a/backend/.sqlx/query-484e7ea64028000ccadd135f9806fd190f860d8ac1cd1c56828fa5be279f57c9.json b/backend/.sqlx/query-484e7ea64028000ccadd135f9806fd190f860d8ac1cd1c56828fa5be279f57c9.json new file mode 100644 index 0000000..b1973e3 --- /dev/null +++ b/backend/.sqlx/query-484e7ea64028000ccadd135f9806fd190f860d8ac1cd1c56828fa5be279f57c9.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT accepting_delegations FROM delegate_profiles WHERE user_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "accepting_delegations", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "484e7ea64028000ccadd135f9806fd190f860d8ac1cd1c56828fa5be279f57c9" +} diff --git a/backend/.sqlx/query-4a92221917041b95e9a27e511ca70b404313e7dba1faf19bb1eb1347b2208587.json b/backend/.sqlx/query-4a92221917041b95e9a27e511ca70b404313e7dba1faf19bb1eb1347b2208587.json new file mode 100644 index 0000000..51e7313 --- /dev/null +++ b/backend/.sqlx/query-4a92221917041b95e9a27e511ca70b404313e7dba1faf19bb1eb1347b2208587.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO role_permissions (role_id, permission_id, granted)\n SELECT $1, p.id, TRUE FROM permissions p WHERE p.name = $2\n ON CONFLICT (role_id, permission_id) DO UPDATE SET granted = TRUE", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "4a92221917041b95e9a27e511ca70b404313e7dba1faf19bb1eb1347b2208587" +} diff --git a/backend/.sqlx/query-4aa9e85bb2280a67f94914b3de749b262b16223d467464079ce36509b857f306.json b/backend/.sqlx/query-4aa9e85bb2280a67f94914b3de749b262b16223d467464079ce36509b857f306.json new file mode 100644 index 0000000..31c1e56 --- /dev/null +++ b/backend/.sqlx/query-4aa9e85bb2280a67f94914b3de749b262b16223d467464079ce36509b857f306.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO community_plugins (community_id, plugin_id, settings, is_active)\n SELECT $1, p.id, '{}'::jsonb, true\n FROM plugins p\n WHERE p.is_active = true\n AND p.name = ANY($2)\n ON CONFLICT (community_id, plugin_id)\n DO UPDATE SET is_active = EXCLUDED.is_active\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "TextArray" + ] + }, + "nullable": [] + }, + "hash": "4aa9e85bb2280a67f94914b3de749b262b16223d467464079ce36509b857f306" +} diff --git a/backend/.sqlx/query-4cc6b7b3343bf2afda642707ede4a1e1df7904a137b6b3c34bfdda71088516b7.json b/backend/.sqlx/query-4cc6b7b3343bf2afda642707ede4a1e1df7904a137b6b3c34bfdda71088516b7.json new file mode 100644 index 0000000..c17b282 --- /dev/null +++ b/backend/.sqlx/query-4cc6b7b3343bf2afda642707ede4a1e1df7904a137b6b3c34bfdda71088516b7.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, proposal_id, title, resource_type, content, url, author_name, sort_order, created_at\n FROM proposal_resources\n WHERE proposal_id = $1\n ORDER BY sort_order", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "resource_type", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "url", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "author_name", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "sort_order", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + true, + true, + false, + false + ] + }, + "hash": "4cc6b7b3343bf2afda642707ede4a1e1df7904a137b6b3c34bfdda71088516b7" +} diff --git a/backend/.sqlx/query-4ce35bd4ce5c2dde738b87eb3aa45c21926d67e156fda5cbdcaa5e1db2ebe785.json b/backend/.sqlx/query-4ce35bd4ce5c2dde738b87eb3aa45c21926d67e156fda5cbdcaa5e1db2ebe785.json new file mode 100644 index 0000000..040676f --- /dev/null +++ b/backend/.sqlx/query-4ce35bd4ce5c2dde738b87eb3aa45c21926d67e156fda5cbdcaa5e1db2ebe785.json @@ -0,0 +1,76 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n wi.id,\n wi.status,\n wt.name AS workflow_name,\n wp.name AS current_phase_name,\n wp.phase_type::text AS current_phase_type,\n pi.scheduled_end,\n pi.participant_count,\n pi.quorum_reached,\n (SELECT COUNT(*) FROM workflow_phases WHERE template_id = wi.template_id) AS total_phases,\n (SELECT COUNT(*) FROM phase_instances WHERE workflow_instance_id = wi.id AND status = 'completed') AS completed_phases\n FROM workflow_instances wi\n JOIN workflow_templates wt ON wt.id = wi.template_id\n LEFT JOIN workflow_phases wp ON wp.id = wi.current_phase_id\n LEFT JOIN phase_instances pi ON pi.workflow_instance_id = wi.id AND pi.phase_id = wi.current_phase_id\n WHERE wi.id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "workflow_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "current_phase_name", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "current_phase_type", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "scheduled_end", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "participant_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "quorum_reached", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "total_phases", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "completed_phases", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + true, + false, + false, + null, + null + ] + }, + "hash": "4ce35bd4ce5c2dde738b87eb3aa45c21926d67e156fda5cbdcaa5e1db2ebe785" +} diff --git a/backend/.sqlx/query-4e324f0141e12a57df99f729cccf6133014b6669bda8d3b75ca07bb2a47567b2.json b/backend/.sqlx/query-4e324f0141e12a57df99f729cccf6133014b6669bda8d3b75ca07bb2a47567b2.json new file mode 100644 index 0000000..20c36e6 --- /dev/null +++ b/backend/.sqlx/query-4e324f0141e12a57df99f729cccf6133014b6669bda8d3b75ca07bb2a47567b2.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM deliberation_summaries WHERE proposal_id = $1 AND summary_type = $2::summary_type", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + { + "Custom": { + "name": "summary_type", + "kind": { + "Enum": [ + "executive", + "pro_arguments", + "con_arguments", + "consensus", + "contention", + "questions", + "full" + ] + } + } + } + ] + }, + "nullable": [ + false + ] + }, + "hash": "4e324f0141e12a57df99f729cccf6133014b6669bda8d3b75ca07bb2a47567b2" +} diff --git a/backend/.sqlx/query-4ed6eff19ec25d1e627ff27d656934c56ac363260ba540fc0d225b7ce7da48d3.json b/backend/.sqlx/query-4ed6eff19ec25d1e627ff27d656934c56ac363260ba540fc0d225b7ce7da48d3.json new file mode 100644 index 0000000..3f06c81 --- /dev/null +++ b/backend/.sqlx/query-4ed6eff19ec25d1e627ff27d656934c56ac363260ba540fc0d225b7ce7da48d3.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT user_has_permission($1, 'community.roles.manage', $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "user_has_permission", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "4ed6eff19ec25d1e627ff27d656934c56ac363260ba540fc0d225b7ce7da48d3" +} diff --git a/backend/.sqlx/query-4f6d0f4d874780e2729f6e6f2e50ba35dddf8cfaa1de0f4213e2ca70aad2d3cc.json b/backend/.sqlx/query-4f6d0f4d874780e2729f6e6f2e50ba35dddf8cfaa1de0f4213e2ca70aad2d3cc.json new file mode 100644 index 0000000..b584fe6 --- /dev/null +++ b/backend/.sqlx/query-4f6d0f4d874780e2729f6e6f2e50ba35dddf8cfaa1de0f4213e2ca70aad2d3cc.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT author_id, status as \"status: crate::models::ProposalStatus\" FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "status: crate::models::ProposalStatus", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "4f6d0f4d874780e2729f6e6f2e50ba35dddf8cfaa1de0f4213e2ca70aad2d3cc" +} diff --git a/backend/.sqlx/query-4fbe1a8019228d78f6b2361fd7f363b8f4ee2e63104c7a824e289d13b4814e51.json b/backend/.sqlx/query-4fbe1a8019228d78f6b2361fd7f363b8f4ee2e63104c7a824e289d13b4814e51.json new file mode 100644 index 0000000..d796adf --- /dev/null +++ b/backend/.sqlx/query-4fbe1a8019228d78f6b2361fd7f363b8f4ee2e63104c7a824e289d13b4814e51.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE sanctions \n SET status = 'lifted', lifted_at = NOW(), lifted_by = $2, lift_reason = $3\n WHERE id = $1 AND status = 'active'", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "4fbe1a8019228d78f6b2361fd7f363b8f4ee2e63104c7a824e289d13b4814e51" +} diff --git a/backend/.sqlx/query-502a8e2b5a59105bcc6d1d668d47b04d7978dde7265c76ae37915f8ae8842033.json b/backend/.sqlx/query-502a8e2b5a59105bcc6d1d668d47b04d7978dde7265c76ae37915f8ae8842033.json new file mode 100644 index 0000000..adac7fe --- /dev/null +++ b/backend/.sqlx/query-502a8e2b5a59105bcc6d1d668d47b04d7978dde7265c76ae37915f8ae8842033.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO moderation_log (community_id, moderator_id, target_user_id, action_type, reason, details)\n VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id, created_at\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Varchar", + "Text", + "Jsonb" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "502a8e2b5a59105bcc6d1d668d47b04d7978dde7265c76ae37915f8ae8842033" +} diff --git a/backend/.sqlx/query-50c17bb1a4d6b249e39d8d13c2ac7d346bfc2476e7c983223e9b4366b2f08abd.json b/backend/.sqlx/query-50c17bb1a4d6b249e39d8d13c2ac7d346bfc2476e7c983223e9b4366b2f08abd.json new file mode 100644 index 0000000..08691e7 --- /dev/null +++ b/backend/.sqlx/query-50c17bb1a4d6b249e39d8d13c2ac7d346bfc2476e7c983223e9b4366b2f08abd.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n action_type::text AS action_type,\n decision_type,\n total_actions,\n unique_actors,\n unique_targets\n FROM v_moderation_stats\n WHERE community_id IS NOT DISTINCT FROM $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "action_type", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "total_actions", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "unique_actors", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "unique_targets", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + true, + true, + true, + true + ] + }, + "hash": "50c17bb1a4d6b249e39d8d13c2ac7d346bfc2476e7c983223e9b4366b2f08abd" +} diff --git a/backend/.sqlx/query-522c64c31796d1d85b4ce6a6cbac86d0b440c2eb54d14fd700fd005848e4b146.json b/backend/.sqlx/query-522c64c31796d1d85b4ce6a6cbac86d0b440c2eb54d14fd700fd005848e4b146.json new file mode 100644 index 0000000..28ab994 --- /dev/null +++ b/backend/.sqlx/query-522c64c31796d1d85b4ce6a6cbac86d0b440c2eb54d14fd700fd005848e4b146.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT dp.user_id, u.username, dp.display_name, dp.bio, \n dp.accepting_delegations, dp.delegation_policy,\n dp.total_delegators, dp.total_votes_cast\n FROM delegate_profiles dp\n JOIN users u ON dp.user_id = u.id\n WHERE dp.accepting_delegations = TRUE\n ORDER BY dp.total_delegators DESC\n LIMIT 50", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "user_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "bio", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "accepting_delegations", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "delegation_policy", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "total_delegators", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "total_votes_cast", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + true, + false, + true, + false, + false + ] + }, + "hash": "522c64c31796d1d85b4ce6a6cbac86d0b440c2eb54d14fd700fd005848e4b146" +} diff --git a/backend/.sqlx/query-5303a6a8c505e05aca3e158cf4aae36c2ad5a98374032d3eb604363f777def7e.json b/backend/.sqlx/query-5303a6a8c505e05aca3e158cf4aae36c2ad5a98374032d3eb604363f777def7e.json new file mode 100644 index 0000000..a5b3354 --- /dev/null +++ b/backend/.sqlx/query-5303a6a8c505e05aca3e158cf4aae36c2ad5a98374032d3eb604363f777def7e.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_rules (\n community_id, code, title, description, severity, created_by\n ) VALUES ($1, $2, $3, $4, $5::rule_severity, $6)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Text", + { + "Custom": { + "name": "rule_severity", + "kind": { + "Enum": [ + "info", + "warning", + "minor", + "major", + "critical" + ] + } + } + }, + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "5303a6a8c505e05aca3e158cf4aae36c2ad5a98374032d3eb604363f777def7e" +} diff --git a/backend/.sqlx/query-536866ecf3349598d172c0011a0970a0f7926658d166a4940fca89cabaf3d514.json b/backend/.sqlx/query-536866ecf3349598d172c0011a0970a0f7926658d166a4940fca89cabaf3d514.json new file mode 100644 index 0000000..eff1078 --- /dev/null +++ b/backend/.sqlx/query-536866ecf3349598d172c0011a0970a0f7926658d166a4940fca89cabaf3d514.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT DISTINCT proposal_id FROM deliberation_arguments\n WHERE created_at > NOW() - INTERVAL '1 day'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "proposal_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "536866ecf3349598d172c0011a0970a0f7926658d166a4940fca89cabaf3d514" +} diff --git a/backend/.sqlx/query-53e54a4fe0344eb04f52ac7d737663b824bbfc3c65e3cebd23ff3dfa5a89fbe8.json b/backend/.sqlx/query-53e54a4fe0344eb04f52ac7d737663b824bbfc3c65e3cebd23ff3dfa5a89fbe8.json new file mode 100644 index 0000000..4a270d1 --- /dev/null +++ b/backend/.sqlx/query-53e54a4fe0344eb04f52ac7d737663b824bbfc3c65e3cebd23ff3dfa5a89fbe8.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload)\n VALUES ($1, $2, $3, $4, $5)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Varchar", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "53e54a4fe0344eb04f52ac7d737663b824bbfc3c65e3cebd23ff3dfa5a89fbe8" +} diff --git a/backend/.sqlx/query-54262c9a793fdbf907f81f6974d4fa57bbd4140b6bd9fc84b27f4abcdc9cefc4.json b/backend/.sqlx/query-54262c9a793fdbf907f81f6974d4fa57bbd4140b6bd9fc84b27f4abcdc9cefc4.json new file mode 100644 index 0000000..e9c1b4a --- /dev/null +++ b/backend/.sqlx/query-54262c9a793fdbf907f81f6974d4fa57bbd4140b6bd9fc84b27f4abcdc9cefc4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(MAX(session_number), 0)::int + 1 FROM mediation_sessions WHERE conflict_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "54262c9a793fdbf907f81f6974d4fa57bbd4140b6bd9fc84b27f4abcdc9cefc4" +} diff --git a/backend/.sqlx/query-563efcefcb6880c075c16d278b11631166fc30678c161bdff1a68b8f491bf335.json b/backend/.sqlx/query-563efcefcb6880c075c16d278b11631166fc30678c161bdff1a68b8f491bf335.json new file mode 100644 index 0000000..c7791de --- /dev/null +++ b/backend/.sqlx/query-563efcefcb6880c075c16d278b11631166fc30678c161bdff1a68b8f491bf335.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO federated_decisions \n (federated_proposal_id, decision_type, outcome, total_votes, is_final)\n VALUES ($1, 'vote', 'pending', $2, false)\n ON CONFLICT DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "563efcefcb6880c075c16d278b11631166fc30678c161bdff1a68b8f491bf335" +} diff --git a/backend/.sqlx/query-56a3e0906a82b4c7df2d066194300d7c5c704562990cf7d835541083884d192e.json b/backend/.sqlx/query-56a3e0906a82b4c7df2d066194300d7c5c704562990cf7d835541083884d192e.json new file mode 100644 index 0000000..0d8b61a --- /dev/null +++ b/backend/.sqlx/query-56a3e0906a82b4c7df2d066194300d7c5c704562990cf7d835541083884d192e.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM community_members WHERE user_id = $1 AND community_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "56a3e0906a82b4c7df2d066194300d7c5c704562990cf7d835541083884d192e" +} diff --git a/backend/.sqlx/query-56be8018a90784cb6868af7bd4a7dff4e1e8ddf09e6d08b97c2e0cc3a6dec0e5.json b/backend/.sqlx/query-56be8018a90784cb6868af7bd4a7dff4e1e8ddf09e6d08b97c2e0cc3a6dec0e5.json new file mode 100644 index 0000000..15aabb4 --- /dev/null +++ b/backend/.sqlx/query-56be8018a90784cb6868af7bd4a7dff4e1e8ddf09e6d08b97c2e0cc3a6dec0e5.json @@ -0,0 +1,114 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n ml.id,\n ml.sequence_number,\n ml.community_id,\n ml.actor_user_id,\n u.username AS \"actor_username?\",\n ml.actor_role,\n ml.action_type::text AS \"action_type!\",\n ml.target_type,\n ml.target_id,\n ml.reason,\n ml.rule_reference,\n ml.evidence,\n ml.duration_hours,\n ml.decision_type,\n ml.entry_hash,\n ml.created_at\n FROM moderation_ledger ml\n LEFT JOIN users u ON u.id = ml.actor_user_id\n WHERE ml.community_id IS NOT DISTINCT FROM $1\n ORDER BY ml.sequence_number DESC\n LIMIT $2 OFFSET $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "sequence_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "actor_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "actor_username?", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "actor_role", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "action_type!", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "target_type", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "target_id", + "type_info": "Uuid" + }, + { + "ordinal": 9, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "rule_reference", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "evidence", + "type_info": "Jsonb" + }, + { + "ordinal": 12, + "name": "duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "entry_hash", + "type_info": "Text" + }, + { + "ordinal": 15, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + null, + false, + false, + false, + true, + true, + true, + false, + false, + false + ] + }, + "hash": "56be8018a90784cb6868af7bd4a7dff4e1e8ddf09e6d08b97c2e0cc3a6dec0e5" +} diff --git a/backend/.sqlx/query-598da730fbfa2531c8882c1456f8e1a13c6a99c177109e6a88b44e66e923e8f0.json b/backend/.sqlx/query-598da730fbfa2531c8882c1456f8e1a13c6a99c177109e6a88b44e66e923e8f0.json new file mode 100644 index 0000000..26760fb --- /dev/null +++ b/backend/.sqlx/query-598da730fbfa2531c8882c1456f8e1a13c6a99c177109e6a88b44e66e923e8f0.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, instance_url, instance_name, status::text AS \"status!\", trust_level\n FROM federated_instances ORDER BY instance_name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "instance_url", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "instance_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "trust_level", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + null, + false + ] + }, + "hash": "598da730fbfa2531c8882c1456f8e1a13c6a99c177109e6a88b44e66e923e8f0" +} diff --git a/backend/.sqlx/query-599b54e38d5be790010a42f8db1cdd210506243f533fe02b90974c51d878ef52.json b/backend/.sqlx/query-599b54e38d5be790010a42f8db1cdd210506243f533fe02b90974c51d878ef52.json new file mode 100644 index 0000000..3072ed1 --- /dev/null +++ b/backend/.sqlx/query-599b54e38d5be790010a42f8db1cdd210506243f533fe02b90974c51d878ef52.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE conflict_mediators SET notes_count = notes_count + 1, last_activity_at = NOW() WHERE conflict_id = $1 AND user_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "599b54e38d5be790010a42f8db1cdd210506243f533fe02b90974c51d878ef52" +} diff --git a/backend/.sqlx/query-5ae8c8c64a800b7735ab381b7ff73988ec35b8839beb5f108400e1e85295aaa8.json b/backend/.sqlx/query-5ae8c8c64a800b7735ab381b7ff73988ec35b8839beb5f108400e1e85295aaa8.json new file mode 100644 index 0000000..b42c110 --- /dev/null +++ b/backend/.sqlx/query-5ae8c8c64a800b7735ab381b7ff73988ec35b8839beb5f108400e1e85295aaa8.json @@ -0,0 +1,84 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT p.id, p.title, p.description, p.status as \"status: String\", p.created_at,\n c.name as community_name, c.slug as community_slug,\n COALESCE((SELECT COUNT(*) FROM votes v JOIN proposal_options po ON v.option_id = po.id WHERE po.proposal_id = p.id), 0) as vote_count,\n COALESCE((SELECT COUNT(*) FROM comments WHERE proposal_id = p.id), 0) as comment_count\n FROM proposals p\n JOIN communities c ON p.community_id = c.id\n WHERE p.author_id = $1 AND c.is_active = true\n ORDER BY p.created_at DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "status: String", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 4, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "community_name", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "community_slug", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "vote_count", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "comment_count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + null, + null + ] + }, + "hash": "5ae8c8c64a800b7735ab381b7ff73988ec35b8839beb5f108400e1e85295aaa8" +} diff --git a/backend/.sqlx/query-5b3990eadd408f017c3f97f9eec54ebf9d4b17e8f52b7be7e8d4f7a4d4303045.json b/backend/.sqlx/query-5b3990eadd408f017c3f97f9eec54ebf9d4b17e8f52b7be7e8d4f7a4d4303045.json new file mode 100644 index 0000000..a7685b3 --- /dev/null +++ b/backend/.sqlx/query-5b3990eadd408f017c3f97f9eec54ebf9d4b17e8f52b7be7e8d4f7a4d4303045.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegate_profiles (user_id)\n VALUES ($1)\n ON CONFLICT (user_id) DO UPDATE SET user_id = $1\n RETURNING display_name, bio, accepting_delegations, delegation_policy, \n total_delegators, total_votes_cast", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "bio", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "accepting_delegations", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "delegation_policy", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "total_delegators", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "total_votes_cast", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + false, + true, + false, + false + ] + }, + "hash": "5b3990eadd408f017c3f97f9eec54ebf9d4b17e8f52b7be7e8d4f7a4d4303045" +} diff --git a/backend/.sqlx/query-5bf7ea39ce22f5466a08746b98091b897f35a72561e668a1643d63c9427a96ca.json b/backend/.sqlx/query-5bf7ea39ce22f5466a08746b98091b897f35a72561e668a1643d63c9427a96ca.json new file mode 100644 index 0000000..96777e3 --- /dev/null +++ b/backend/.sqlx/query-5bf7ea39ce22f5466a08746b98091b897f35a72561e668a1643d63c9427a96ca.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(\n (SELECT (cp.settings->>'public_ledger')::boolean\n FROM community_plugins cp\n JOIN plugins p ON p.id = cp.plugin_id\n WHERE cp.community_id = $1 AND p.name = 'moderation_ledger'),\n true\n ) AS \"is_public!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_public!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "5bf7ea39ce22f5466a08746b98091b897f35a72561e668a1643d63c9427a96ca" +} diff --git a/backend/.sqlx/query-5c54f1d4567d6c7baf6969687c7f19f71dfdcacebdcbcb5284b0243cc12895e2.json b/backend/.sqlx/query-5c54f1d4567d6c7baf6969687c7f19f71dfdcacebdcbcb5284b0243cc12895e2.json new file mode 100644 index 0000000..9357c96 --- /dev/null +++ b/backend/.sqlx/query-5c54f1d4567d6c7baf6969687c7f19f71dfdcacebdcbcb5284b0243cc12895e2.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO mediation_sessions (\n conflict_id, session_number, scheduled_at, duration_minutes, agenda\n ) VALUES ($1, $2, $3, $4, $5)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int4", + "Timestamptz", + "Int4", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "5c54f1d4567d6c7baf6969687c7f19f71dfdcacebdcbcb5284b0243cc12895e2" +} diff --git a/backend/.sqlx/query-5d369c593b73b9c9835ef79206367824f5cc656cb8d93d3a4af3262c73cf88ad.json b/backend/.sqlx/query-5d369c593b73b9c9835ef79206367824f5cc656cb8d93d3a4af3262c73cf88ad.json new file mode 100644 index 0000000..b140ab3 --- /dev/null +++ b/backend/.sqlx/query-5d369c593b73b9c9835ef79206367824f5cc656cb8d93d3a4af3262c73cf88ad.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, community_id, code, title, description,\n scope::text AS \"scope!\", severity::text AS \"severity!\",\n is_active, allow_community_vote\n FROM community_rules\n WHERE community_id = $1\n ORDER BY \n CASE severity \n WHEN 'critical' THEN 1 \n WHEN 'major' THEN 2 \n WHEN 'minor' THEN 3 \n ELSE 4 \n END", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "code", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "scope!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "severity!", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "allow_community_vote", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + null, + null, + false, + false + ] + }, + "hash": "5d369c593b73b9c9835ef79206367824f5cc656cb8d93d3a4af3262c73cf88ad" +} diff --git a/backend/.sqlx/query-5ea4e4b2dafd654c306e4507faaa1d7112461cedba992f8d2ddc923c42cd7b6b.json b/backend/.sqlx/query-5ea4e4b2dafd654c306e4507faaa1d7112461cedba992f8d2ddc923c42cd7b6b.json new file mode 100644 index 0000000..cdf1f28 --- /dev/null +++ b/backend/.sqlx/query-5ea4e4b2dafd654c306e4507faaa1d7112461cedba992f8d2ddc923c42cd7b6b.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO pending_communities (name, slug, description, requested_by)\n VALUES ($1, $2, $3, $4)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5ea4e4b2dafd654c306e4507faaa1d7112461cedba992f8d2ddc923c42cd7b6b" +} diff --git a/backend/.sqlx/query-5fd1b28bc068dc4592e1c6a58bd348f4d9feae00d29ea677ae643593dda852a4.json b/backend/.sqlx/query-5fd1b28bc068dc4592e1c6a58bd348f4d9feae00d29ea677ae643593dda852a4.json new file mode 100644 index 0000000..36a1d45 --- /dev/null +++ b/backend/.sqlx/query-5fd1b28bc068dc4592e1c6a58bd348f4d9feae00d29ea677ae643593dda852a4.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO pending_registrations (username, email, password_hash, display_name, invitation_id)\n VALUES ($1, $2, $3, $4, $5)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5fd1b28bc068dc4592e1c6a58bd348f4d9feae00d29ea677ae643593dda852a4" +} diff --git a/backend/.sqlx/query-5ff99236c732a045c53789220aa54f9f494aa49b1b55b207ba777e9cbc2e8ddd.json b/backend/.sqlx/query-5ff99236c732a045c53789220aa54f9f494aa49b1b55b207ba777e9cbc2e8ddd.json new file mode 100644 index 0000000..e6a9f3b --- /dev/null +++ b/backend/.sqlx/query-5ff99236c732a045c53789220aa54f9f494aa49b1b55b207ba777e9cbc2e8ddd.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT pi.id FROM phase_instances pi WHERE pi.status = 'active'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "5ff99236c732a045c53789220aa54f9f494aa49b1b55b207ba777e9cbc2e8ddd" +} diff --git a/backend/.sqlx/query-6098a745505990aea4405d2ab48fd8e4839518f714261bc8fc1cc01fe298ffd1.json b/backend/.sqlx/query-6098a745505990aea4405d2ab48fd8e4839518f714261bc8fc1cc01fe298ffd1.json new file mode 100644 index 0000000..0308857 --- /dev/null +++ b/backend/.sqlx/query-6098a745505990aea4405d2ab48fd8e4839518f714261bc8fc1cc01fe298ffd1.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT pc.id, pc.name, pc.slug, pc.description, pc.requested_by, \n pc.status, pc.created_at, u.username as requester_username\n FROM pending_communities pc\n LEFT JOIN users u ON u.id = pc.requested_by\n WHERE pc.status = $1\n ORDER BY pc.created_at DESC\n LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "requested_by", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "status", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "requester_username", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + true, + true, + false + ] + }, + "hash": "6098a745505990aea4405d2ab48fd8e4839518f714261bc8fc1cc01fe298ffd1" +} diff --git a/backend/.sqlx/query-6252da4b5dd1636fb1aa75dc10bd8ede8f9b12fdb80b0edaa999c321993233ae.json b/backend/.sqlx/query-6252da4b5dd1636fb1aa75dc10bd8ede8f9b12fdb80b0edaa999c321993233ae.json new file mode 100644 index 0000000..9775d8b --- /dev/null +++ b/backend/.sqlx/query-6252da4b5dd1636fb1aa75dc10bd8ede8f9b12fdb80b0edaa999c321993233ae.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE rule_violations \n SET status = 'confirmed', \n reviewed_by = $2, \n reviewed_at = NOW(),\n review_notes = $3,\n escalation_level = $4\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "6252da4b5dd1636fb1aa75dc10bd8ede8f9b12fdb80b0edaa999c321993233ae" +} diff --git a/backend/.sqlx/query-633a36b68dd39b0e443b169ac7bcfeebbd2e8b76a09791f8f3bd5190bdc569e3.json b/backend/.sqlx/query-633a36b68dd39b0e443b169ac7bcfeebbd2e8b76a09791f8f3bd5190bdc569e3.json new file mode 100644 index 0000000..8cf7a67 --- /dev/null +++ b/backend/.sqlx/query-633a36b68dd39b0e443b169ac7bcfeebbd2e8b76a09791f8f3bd5190bdc569e3.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO workflow_templates (community_id, name, description, created_by)\n VALUES ($1, $2, $3, $4)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "633a36b68dd39b0e443b169ac7bcfeebbd2e8b76a09791f8f3bd5190bdc569e3" +} diff --git a/backend/.sqlx/query-63426c52f6016b030b23a562fe2aa08793ecb5e3a3a24f82bc7666cd8cf5cd68.json b/backend/.sqlx/query-63426c52f6016b030b23a562fe2aa08793ecb5e3a3a24f82bc7666cd8cf5cd68.json new file mode 100644 index 0000000..36b8009 --- /dev/null +++ b/backend/.sqlx/query-63426c52f6016b030b23a562fe2aa08793ecb5e3a3a24f82bc7666cd8cf5cd68.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT name, slug, created_at FROM communities WHERE is_active = true ORDER BY created_at DESC LIMIT 5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "63426c52f6016b030b23a562fe2aa08793ecb5e3a3a24f82bc7666cd8cf5cd68" +} diff --git a/backend/.sqlx/query-641be20c2834a02a49f6c658663d73fb5779eb14e36cf13a9ecbfa61440c5332.json b/backend/.sqlx/query-641be20c2834a02a49f6c658663d73fb5779eb14e36cf13a9ecbfa61440c5332.json new file mode 100644 index 0000000..73d0ffc --- /dev/null +++ b/backend/.sqlx/query-641be20c2834a02a49f6c658663d73fb5779eb14e36cf13a9ecbfa61440c5332.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT community_id, membership_mode, moderation_mode,\n governance_model, plugin_policy, features_enabled\n FROM community_settings WHERE community_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "membership_mode", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "moderation_mode", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "governance_model", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "plugin_policy", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "features_enabled", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "641be20c2834a02a49f6c658663d73fb5779eb14e36cf13a9ecbfa61440c5332" +} diff --git a/backend/.sqlx/query-65a8781dff58442082a16c731be5acfdb3c61ad599b2031433be6c554821df05.json b/backend/.sqlx/query-65a8781dff58442082a16c731be5acfdb3c61ad599b2031433be6c554821df05.json new file mode 100644 index 0000000..d00a5ce --- /dev/null +++ b/backend/.sqlx/query-65a8781dff58442082a16c731be5acfdb3c61ad599b2031433be6c554821df05.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE notifications SET is_read = true WHERE user_id = $1 AND is_read = false", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "65a8781dff58442082a16c731be5acfdb3c61ad599b2031433be6c554821df05" +} diff --git a/backend/.sqlx/query-660253bf0225d065ae50eab8625e1f9662779997acef345182ce23e96efde2c3.json b/backend/.sqlx/query-660253bf0225d065ae50eab8625e1f9662779997acef345182ce23e96efde2c3.json new file mode 100644 index 0000000..566354a --- /dev/null +++ b/backend/.sqlx/query-660253bf0225d065ae50eab8625e1f9662779997acef345182ce23e96efde2c3.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n total_arguments, pro_arguments, con_arguments, neutral_arguments,\n unique_participants, substantive_ratio::float8 AS substantive_ratio,\n balance_score::float8 AS balance_score\n FROM deliberation_metrics\n WHERE proposal_id = $1\n ORDER BY calculated_at DESC\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_arguments", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "pro_arguments", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "con_arguments", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "neutral_arguments", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "unique_participants", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "substantive_ratio", + "type_info": "Float8" + }, + { + "ordinal": 6, + "name": "balance_score", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + null, + null + ] + }, + "hash": "660253bf0225d065ae50eab8625e1f9662779997acef345182ce23e96efde2c3" +} diff --git a/backend/.sqlx/query-68f175d74997d4cfc1ce98756bb32d8f7d3466d5ea71f137ac101ebd07e0073e.json b/backend/.sqlx/query-68f175d74997d4cfc1ce98756bb32d8f7d3466d5ea71f137ac101ebd07e0073e.json new file mode 100644 index 0000000..b8ec933 --- /dev/null +++ b/backend/.sqlx/query-68f175d74997d4cfc1ce98756bb32d8f7d3466d5ea71f137ac101ebd07e0073e.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, label FROM proposal_options WHERE proposal_id = $1 ORDER BY sort_order", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "label", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "68f175d74997d4cfc1ce98756bb32d8f7d3466d5ea71f137ac101ebd07e0073e" +} diff --git a/backend/.sqlx/query-6a6bec68b35012df41e6bb99b5afc11a90e3404fa29698fb04fa3ad18ad2025b.json b/backend/.sqlx/query-6a6bec68b35012df41e6bb99b5afc11a90e3404fa29698fb04fa3ad18ad2025b.json new file mode 100644 index 0000000..562d336 --- /dev/null +++ b/backend/.sqlx/query-6a6bec68b35012df41e6bb99b5afc11a90e3404fa29698fb04fa3ad18ad2025b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE users SET display_name = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6a6bec68b35012df41e6bb99b5afc11a90e3404fa29698fb04fa3ad18ad2025b" +} diff --git a/backend/.sqlx/query-6b3fef1781daf9672ad11cbc1e2d84e536ff7ee9b3cef4d5f517a2ed27c586c4.json b/backend/.sqlx/query-6b3fef1781daf9672ad11cbc1e2d84e536ff7ee9b3cef4d5f517a2ed27c586c4.json new file mode 100644 index 0000000..3256e5a --- /dev/null +++ b/backend/.sqlx/query-6b3fef1781daf9672ad11cbc1e2d84e536ff7ee9b3cef4d5f517a2ed27c586c4.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO community_plugins (community_id, plugin_id, settings, is_active)\n VALUES ($1, $2, COALESCE($3, '{}'::jsonb), COALESCE($4, true))\n ON CONFLICT (community_id, plugin_id)\n DO UPDATE SET\n settings = COALESCE($3, community_plugins.settings),\n is_active = COALESCE($4, community_plugins.is_active),\n activated_at = CASE\n WHEN COALESCE($4, community_plugins.is_active) = true AND community_plugins.is_active = false THEN NOW()\n ELSE community_plugins.activated_at\n END\n RETURNING is_active as community_is_active,\n settings as \"settings!: serde_json::Value\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_is_active", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Jsonb", + "Bool" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "6b3fef1781daf9672ad11cbc1e2d84e536ff7ee9b3cef4d5f517a2ed27c586c4" +} diff --git a/backend/.sqlx/query-6d1c9e2cb972dbc9df0a72034cf0390e72bb8baecd2b09d8ae305ab3844439a0.json b/backend/.sqlx/query-6d1c9e2cb972dbc9df0a72034cf0390e72bb8baecd2b09d8ae305ab3844439a0.json new file mode 100644 index 0000000..127bc26 --- /dev/null +++ b/backend/.sqlx/query-6d1c9e2cb972dbc9df0a72034cf0390e72bb8baecd2b09d8ae305ab3844439a0.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT settings as \"settings!: serde_json::Value\" FROM communities WHERE id = $1 AND is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6d1c9e2cb972dbc9df0a72034cf0390e72bb8baecd2b09d8ae305ab3844439a0" +} diff --git a/backend/.sqlx/query-6dc1dc04d2084400be320aa33507746a38814325f3525dd0f38e05c6a64f1f7a.json b/backend/.sqlx/query-6dc1dc04d2084400be320aa33507746a38814325f3525dd0f38e05c6a64f1f7a.json new file mode 100644 index 0000000..fe59ee8 --- /dev/null +++ b/backend/.sqlx/query-6dc1dc04d2084400be320aa33507746a38814325f3525dd0f38e05c6a64f1f7a.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT cm.user_id, cm.role, cm.joined_at, u.username, u.display_name\n FROM community_members cm\n JOIN users u ON cm.user_id = u.id\n WHERE cm.community_id = $1\n ORDER BY cm.joined_at\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "user_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "role", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "joined_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "display_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true + ] + }, + "hash": "6dc1dc04d2084400be320aa33507746a38814325f3525dd0f38e05c6a64f1f7a" +} diff --git a/backend/.sqlx/query-6e44a5f3076d976fdad560f5c4ccb1802df408602aecb3025b851969ff480708.json b/backend/.sqlx/query-6e44a5f3076d976fdad560f5c4ccb1802df408602aecb3025b851969ff480708.json new file mode 100644 index 0000000..a523509 --- /dev/null +++ b/backend/.sqlx/query-6e44a5f3076d976fdad560f5c4ccb1802df408602aecb3025b851969ff480708.json @@ -0,0 +1,80 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, name, display_name, description, icon, is_active, is_default,\n config_schema, default_config, complexity_level, supports_delegation\n FROM voting_method_plugins ORDER BY name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "icon", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "config_schema", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "default_config", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "complexity_level", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "supports_delegation", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + false + ] + }, + "hash": "6e44a5f3076d976fdad560f5c4ccb1802df408602aecb3025b851969ff480708" +} diff --git a/backend/.sqlx/query-6e60bcb9d941716127a4ed3cead4b9a375deb0e4238137d3b095c43bae316702.json b/backend/.sqlx/query-6e60bcb9d941716127a4ed3cead4b9a375deb0e4238137d3b095c43bae316702.json new file mode 100644 index 0000000..f87c94d --- /dev/null +++ b/backend/.sqlx/query-6e60bcb9d941716127a4ed3cead4b9a375deb0e4238137d3b095c43bae316702.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT can_comment, can_vote FROM deliberation_reading_log\n WHERE proposal_id = $1 AND user_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "can_comment", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "can_vote", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "6e60bcb9d941716127a4ed3cead4b9a375deb0e4238137d3b095c43bae316702" +} diff --git a/backend/.sqlx/query-6e71eb3247a4b255a2187731a9d54c4fe2aac97e458c1247b8b89364ff590b9f.json b/backend/.sqlx/query-6e71eb3247a4b255a2187731a9d54c4fe2aac97e458c1247b8b89364ff590b9f.json new file mode 100644 index 0000000..d37b880 --- /dev/null +++ b/backend/.sqlx/query-6e71eb3247a4b255a2187731a9d54c4fe2aac97e458c1247b8b89364ff590b9f.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "role", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6e71eb3247a4b255a2187731a9d54c4fe2aac97e458c1247b8b89364ff590b9f" +} diff --git a/backend/.sqlx/query-6f89b8deda1078080ad8cfc2db507325e19bb0ffd02f1aa4f933045633c4914a.json b/backend/.sqlx/query-6f89b8deda1078080ad8cfc2db507325e19bb0ffd02f1aa4f933045633c4914a.json new file mode 100644 index 0000000..1a0947d --- /dev/null +++ b/backend/.sqlx/query-6f89b8deda1078080ad8cfc2db507325e19bb0ffd02f1aa4f933045633c4914a.json @@ -0,0 +1,80 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO users (username, email, password_hash, display_name, is_admin)\n VALUES ($1, $2, $3, $4, $5)\n RETURNING *\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "password_hash", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "is_admin", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "invited_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Bool" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "6f89b8deda1078080ad8cfc2db507325e19bb0ffd02f1aa4f933045633c4914a" +} diff --git a/backend/.sqlx/query-6f92eb099a3182a7f862df97a6f9cf340bef74f512099000e03450f69b796ef1.json b/backend/.sqlx/query-6f92eb099a3182a7f862df97a6f9cf340bef74f512099000e03450f69b796ef1.json new file mode 100644 index 0000000..41b0ecb --- /dev/null +++ b/backend/.sqlx/query-6f92eb099a3182a7f862df97a6f9cf340bef74f512099000e03450f69b796ef1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE conflict_cases SET \n status = 'resolved', \n resolved_at = NOW(),\n resolution_type = 'compromise_accepted',\n updated_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6f92eb099a3182a7f862df97a6f9cf340bef74f512099000e03450f69b796ef1" +} diff --git a/backend/.sqlx/query-706d61211ccde2d45e1fc60480b37719f90ce364bf3a118743bfe82eca3783ae.json b/backend/.sqlx/query-706d61211ccde2d45e1fc60480b37719f90ce364bf3a118743bfe82eca3783ae.json new file mode 100644 index 0000000..748ea72 --- /dev/null +++ b/backend/.sqlx/query-706d61211ccde2d45e1fc60480b37719f90ce364bf3a118743bfe82eca3783ae.json @@ -0,0 +1,65 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, community_id, snapshot_date,\n total_members, active_members,\n votes_cast, unique_voters,\n engagement_score::float8 AS engagement_score\n FROM participation_snapshots\n WHERE community_id = $1\n AND snapshot_date > CURRENT_DATE - make_interval(days => $2)\n ORDER BY snapshot_date DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "snapshot_date", + "type_info": "Date" + }, + { + "ordinal": 3, + "name": "total_members", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "active_members", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "votes_cast", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "unique_voters", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "engagement_score", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + null + ] + }, + "hash": "706d61211ccde2d45e1fc60480b37719f90ce364bf3a118743bfe82eca3783ae" +} diff --git a/backend/.sqlx/query-7148baad9c5cba418a5d7818761e3ac52fe166fbfa0bfc5f6bbe749fa0a6889e.json b/backend/.sqlx/query-7148baad9c5cba418a5d7818761e3ac52fe166fbfa0bfc5f6bbe749fa0a6889e.json new file mode 100644 index 0000000..f6791fc --- /dev/null +++ b/backend/.sqlx/query-7148baad9c5cba418a5d7818761e3ac52fe166fbfa0bfc5f6bbe749fa0a6889e.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, local_community_id, remote_instance_id, remote_community_id,\n remote_community_name, status::text AS \"status!\"\n FROM community_federations\n WHERE local_community_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "local_community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "remote_instance_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "remote_community_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "remote_community_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "status!", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + null + ] + }, + "hash": "7148baad9c5cba418a5d7818761e3ac52fe166fbfa0bfc5f6bbe749fa0a6889e" +} diff --git a/backend/.sqlx/query-72132fbc4c6da619b3055bd473b5928c8a1ff7886a613d9d6be3f2ab2a7a7f4d.json b/backend/.sqlx/query-72132fbc4c6da619b3055bd473b5928c8a1ff7886a613d9d6be3f2ab2a7a7f4d.json new file mode 100644 index 0000000..6243174 --- /dev/null +++ b/backend/.sqlx/query-72132fbc4c6da619b3055bd473b5928c8a1ff7886a613d9d6be3f2ab2a7a7f4d.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM communities WHERE id = $1 AND is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "settings", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "72132fbc4c6da619b3055bd473b5928c8a1ff7886a613d9d6be3f2ab2a7a7f4d" +} diff --git a/backend/.sqlx/query-733ab1cda0fdc0cecf46928410fefb3b4e96eb2390dfdb93b95ce4c13f0d9e59.json b/backend/.sqlx/query-733ab1cda0fdc0cecf46928410fefb3b4e96eb2390dfdb93b95ce4c13f0d9e59.json new file mode 100644 index 0000000..de72d48 --- /dev/null +++ b/backend/.sqlx/query-733ab1cda0fdc0cecf46928410fefb3b4e96eb2390dfdb93b95ce4c13f0d9e59.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_options (proposal_id, label, sort_order) VALUES ($1, 'Approve', 1), ($1, 'Reject', 2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "733ab1cda0fdc0cecf46928410fefb3b4e96eb2390dfdb93b95ce4c13f0d9e59" +} diff --git a/backend/.sqlx/query-733faae97d10baca47579a4a56ce281a6f36a60c73739f9ad692fc8b3825964c.json b/backend/.sqlx/query-733faae97d10baca47579a4a56ce281a6f36a60c73739f9ad692fc8b3825964c.json new file mode 100644 index 0000000..9827832 --- /dev/null +++ b/backend/.sqlx/query-733faae97d10baca47579a4a56ce281a6f36a60c73739f9ad692fc8b3825964c.json @@ -0,0 +1,82 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT gmr.id, gmr.gitlab_iid, gmr.title, gmr.description, gmr.state,\n gmr.author_username, gmr.source_branch, gmr.target_branch,\n gmr.labels, gmr.proposal_id, gmr.gitlab_created_at\n FROM gitlab_merge_requests gmr\n JOIN gitlab_connections gc ON gmr.connection_id = gc.id\n WHERE gc.community_id = $1\n ORDER BY gmr.gitlab_iid DESC\n LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "gitlab_iid", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "state", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "author_username", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "source_branch", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "target_branch", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "labels", + "type_info": "TextArray" + }, + { + "ordinal": 9, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 10, + "name": "gitlab_created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "733faae97d10baca47579a4a56ce281a6f36a60c73739f9ad692fc8b3825964c" +} diff --git a/backend/.sqlx/query-73720cd6ae56d7e210c173ca90f6c982f68150b76dc2d4cd578965c62ddb11be.json b/backend/.sqlx/query-73720cd6ae56d7e210c173ca90f6c982f68150b76dc2d4cd578965c62ddb11be.json new file mode 100644 index 0000000..1d09725 --- /dev/null +++ b/backend/.sqlx/query-73720cd6ae56d7e210c173ca90f6c982f68150b76dc2d4cd578965c62ddb11be.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_amendments (\n proposal_id, target_version, title, description,\n suggested_changes, proposed_by\n ) VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int4", + "Varchar", + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "73720cd6ae56d7e210c173ca90f6c982f68150b76dc2d4cd578965c62ddb11be" +} diff --git a/backend/.sqlx/query-73e84e079e38064cbf573149243395913f6a14bbaceedeff7bf41fdcaf63f01c.json b/backend/.sqlx/query-73e84e079e38064cbf573149243395913f6a14bbaceedeff7bf41fdcaf63f01c.json new file mode 100644 index 0000000..f8e1691 --- /dev/null +++ b/backend/.sqlx/query-73e84e079e38064cbf573149243395913f6a14bbaceedeff7bf41fdcaf63f01c.json @@ -0,0 +1,106 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id,\n sequence_number,\n community_id,\n actor_user_id,\n actor_role,\n action_type::text AS \"action_type!\",\n target_type,\n target_id,\n reason,\n rule_reference,\n evidence,\n duration_hours,\n decision_type,\n entry_hash,\n created_at\n FROM moderation_ledger\n WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "sequence_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "actor_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "actor_role", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "action_type!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "target_type", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "target_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "rule_reference", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "evidence", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "entry_hash", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + null, + false, + false, + false, + true, + true, + true, + false, + false, + false + ] + }, + "hash": "73e84e079e38064cbf573149243395913f6a14bbaceedeff7bf41fdcaf63f01c" +} diff --git a/backend/.sqlx/query-740f728d9932a5da47b6b9454ed77304f5e93dc836dc21b3994eb4e7c8bc8219.json b/backend/.sqlx/query-740f728d9932a5da47b6b9454ed77304f5e93dc836dc21b3994eb4e7c8bc8219.json new file mode 100644 index 0000000..b1252e9 --- /dev/null +++ b/backend/.sqlx/query-740f728d9932a5da47b6b9454ed77304f5e93dc836dc21b3994eb4e7c8bc8219.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE invitations SET is_active = FALSE WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "740f728d9932a5da47b6b9454ed77304f5e93dc836dc21b3994eb4e7c8bc8219" +} diff --git a/backend/.sqlx/query-74fdd241eb54952015786937b057d437b60267d664cd4b57a9c394486f6001b2.json b/backend/.sqlx/query-74fdd241eb54952015786937b057d437b60267d664cd4b57a9c394486f6001b2.json new file mode 100644 index 0000000..6991826 --- /dev/null +++ b/backend/.sqlx/query-74fdd241eb54952015786937b057d437b60267d664cd4b57a9c394486f6001b2.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT name FROM plugins WHERE is_active = true AND is_core = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "74fdd241eb54952015786937b057d437b60267d664cd4b57a9c394486f6001b2" +} diff --git a/backend/.sqlx/query-75a8631bc9e4f89b9e1b6d1899711e148228dfa0a6760fbba7ddf2711fb16e6e.json b/backend/.sqlx/query-75a8631bc9e4f89b9e1b6d1899711e148228dfa0a6760fbba7ddf2711fb16e6e.json new file mode 100644 index 0000000..dda7c6d --- /dev/null +++ b/backend/.sqlx/query-75a8631bc9e4f89b9e1b6d1899711e148228dfa0a6760fbba7ddf2711fb16e6e.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n pf.fork_proposal_id,\n p.title AS fork_title,\n u.username AS forked_by_username,\n pf.forked_at,\n pf.fork_reason,\n pf.is_competing,\n pf.is_merged\n FROM proposal_forks pf\n JOIN proposals p ON p.id = pf.fork_proposal_id\n JOIN users u ON u.id = pf.forked_by\n WHERE pf.source_proposal_id = $1\n ORDER BY pf.forked_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "fork_proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "fork_title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "forked_by_username", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "forked_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "fork_reason", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "is_competing", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_merged", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false + ] + }, + "hash": "75a8631bc9e4f89b9e1b6d1899711e148228dfa0a6760fbba7ddf2711fb16e6e" +} diff --git a/backend/.sqlx/query-7635e7f06daac3a495a2439bc893e345effd96be4a8760d4ca11538334d875c4.json b/backend/.sqlx/query-7635e7f06daac3a495a2439bc893e345effd96be4a8760d4ca11538334d875c4.json new file mode 100644 index 0000000..fce33c2 --- /dev/null +++ b/backend/.sqlx/query-7635e7f06daac3a495a2439bc893e345effd96be4a8760d4ca11538334d875c4.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n p.name,\n p.version,\n p.description,\n p.is_core,\n p.is_active as global_is_active,\n COALESCE(cp.is_active, false) as \"community_is_active!\",\n COALESCE(cp.settings, '{}'::jsonb) as \"settings!: serde_json::Value\",\n p.settings_schema as \"settings_schema: serde_json::Value\"\n FROM plugins p\n LEFT JOIN community_plugins cp\n ON cp.plugin_id = p.id AND cp.community_id = $1\n ORDER BY p.is_core DESC, p.name ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "version", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "is_core", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "global_is_active", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "community_is_active!", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "settings_schema: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + null, + null, + true + ] + }, + "hash": "7635e7f06daac3a495a2439bc893e345effd96be4a8760d4ca11538334d875c4" +} diff --git a/backend/.sqlx/query-768935f0dff24d092ebaff3512026b9e6a0344c9d10ee0cf852d299c80bba6c8.json b/backend/.sqlx/query-768935f0dff24d092ebaff3512026b9e6a0344c9d10ee0cf852d299c80bba6c8.json new file mode 100644 index 0000000..9699cfb --- /dev/null +++ b/backend/.sqlx/query-768935f0dff24d092ebaff3512026b9e6a0344c9d10ee0cf852d299c80bba6c8.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposal_amendments SET\n support_count = (SELECT COUNT(*) FROM amendment_support WHERE amendment_id = $1 AND support_type = 'support'),\n oppose_count = (SELECT COUNT(*) FROM amendment_support WHERE amendment_id = $1 AND support_type = 'oppose')\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "768935f0dff24d092ebaff3512026b9e6a0344c9d10ee0cf852d299c80bba6c8" +} diff --git a/backend/.sqlx/query-778a5cc33f4b205914711921c98653f826d2adc8175c9297efb485caf4c3d96d.json b/backend/.sqlx/query-778a5cc33f4b205914711921c98653f826d2adc8175c9297efb485caf4c3d96d.json new file mode 100644 index 0000000..2a7995e --- /dev/null +++ b/backend/.sqlx/query-778a5cc33f4b205914711921c98653f826d2adc8175c9297efb485caf4c3d96d.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n p.name,\n COALESCE(cp.is_active, false) as \"is_active!\",\n COALESCE(cp.settings, '{}'::jsonb) as \"settings!: serde_json::Value\"\n FROM plugins p\n LEFT JOIN community_plugins cp\n ON cp.plugin_id = p.id AND cp.community_id = $1\n WHERE p.is_active = true\n AND p.name = ANY($2)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_active!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "TextArray" + ] + }, + "nullable": [ + false, + null, + null + ] + }, + "hash": "778a5cc33f4b205914711921c98653f826d2adc8175c9297efb485caf4c3d96d" +} diff --git a/backend/.sqlx/query-78418e5f9824bceb1937348a77d50ced7a3e2f21ec72940fe9a70e77cb3923a0.json b/backend/.sqlx/query-78418e5f9824bceb1937348a77d50ced7a3e2f21ec72940fe9a70e77cb3923a0.json new file mode 100644 index 0000000..ba086d1 --- /dev/null +++ b/backend/.sqlx/query-78418e5f9824bceb1937348a77d50ced7a3e2f21ec72940fe9a70e77cb3923a0.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO plugins (name, version, description, is_core, is_active, settings_schema)\n VALUES ($1, $2, $3, $4, true, $5)\n ON CONFLICT (name) DO UPDATE\n SET version = EXCLUDED.version,\n description = EXCLUDED.description,\n is_core = EXCLUDED.is_core,\n settings_schema = EXCLUDED.settings_schema", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text", + "Bool", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "78418e5f9824bceb1937348a77d50ced7a3e2f21ec72940fe9a70e77cb3923a0" +} diff --git a/backend/.sqlx/query-7a3f67d8cafc3611f94a8cb2406ab4c0e90a3619f7b9e4b6ad67fe11a46b91d9.json b/backend/.sqlx/query-7a3f67d8cafc3611f94a8cb2406ab4c0e90a3619f7b9e4b6ad67fe11a46b91d9.json new file mode 100644 index 0000000..2d2d5eb --- /dev/null +++ b/backend/.sqlx/query-7a3f67d8cafc3611f94a8cb2406ab4c0e90a3619f7b9e4b6ad67fe11a46b91d9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO export_audit_log (job_id, action_type, actor_id)\n SELECT $1, 'downloaded', $2\n FROM export_jobs WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "7a3f67d8cafc3611f94a8cb2406ab4c0e90a3619f7b9e4b6ad67fe11a46b91d9" +} diff --git a/backend/.sqlx/query-7b06af69ee03943c64083e5bc94cb6dbd6f5017ad44546dff7582c31345edc51.json b/backend/.sqlx/query-7b06af69ee03943c64083e5bc94cb6dbd6f5017ad44546dff7582c31345edc51.json new file mode 100644 index 0000000..16e35e2 --- /dev/null +++ b/backend/.sqlx/query-7b06af69ee03943c64083e5bc94cb6dbd6f5017ad44546dff7582c31345edc51.json @@ -0,0 +1,99 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegations (delegator_id, delegate_id, scope, community_id, topic_id, proposal_id)\n VALUES ($1, $2, $3::delegation_scope, $4, $5, $6)\n RETURNING id, delegator_id, delegate_id, scope as \"scope: DelegationScope\", \n community_id, topic_id, proposal_id, is_active, created_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "delegator_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "delegate_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "scope: DelegationScope", + "type_info": { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + } + }, + { + "ordinal": 4, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "topic_id", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + }, + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + true, + true, + false, + false + ] + }, + "hash": "7b06af69ee03943c64083e5bc94cb6dbd6f5017ad44546dff7582c31345edc51" +} diff --git a/backend/.sqlx/query-7c8ec9e26f553fc211b5bd819e31722a054bb9414950d16a8ebd451dc18d4f3b.json b/backend/.sqlx/query-7c8ec9e26f553fc211b5bd819e31722a054bb9414950d16a8ebd451dc18d4f3b.json new file mode 100644 index 0000000..e311f1f --- /dev/null +++ b/backend/.sqlx/query-7c8ec9e26f553fc211b5bd819e31722a054bb9414950d16a8ebd451dc18d4f3b.json @@ -0,0 +1,76 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_resources (proposal_id, title, resource_type, content, url, author_name, created_by)\n VALUES ($1, $2, $3, $4, $5, $6, $7)\n RETURNING id, proposal_id, title, resource_type, content, url, author_name, sort_order, created_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "resource_type", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "url", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "author_name", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "sort_order", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Text", + "Varchar", + "Varchar", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + true, + true, + false, + false + ] + }, + "hash": "7c8ec9e26f553fc211b5bd819e31722a054bb9414950d16a8ebd451dc18d4f3b" +} diff --git a/backend/.sqlx/query-7cc78deb0836ac802b017122ac50fe71bbe8af86c7aa5c8b90b10d1672a9d31a.json b/backend/.sqlx/query-7cc78deb0836ac802b017122ac50fe71bbe8af86c7aa5c8b90b10d1672a9d31a.json new file mode 100644 index 0000000..43e3545 --- /dev/null +++ b/backend/.sqlx/query-7cc78deb0836ac802b017122ac50fe71bbe8af86c7aa5c8b90b10d1672a9d31a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by FROM invitations WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7cc78deb0836ac802b017122ac50fe71bbe8af86c7aa5c8b90b10d1672a9d31a" +} diff --git a/backend/.sqlx/query-7cf8e3b8f83a16c49ce2f7e66673d69e773f71abded50c5bb29884f9dff628bd.json b/backend/.sqlx/query-7cf8e3b8f83a16c49ce2f7e66673d69e773f71abded50c5bb29884f9dff628bd.json new file mode 100644 index 0000000..30c5149 --- /dev/null +++ b/backend/.sqlx/query-7cf8e3b8f83a16c49ce2f7e66673d69e773f71abded50c5bb29884f9dff628bd.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM roles WHERE id = $1 AND (community_id = $2 OR community_id IS NULL)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7cf8e3b8f83a16c49ce2f7e66673d69e773f71abded50c5bb29884f9dff628bd" +} diff --git a/backend/.sqlx/query-7f0faa7ca48a6a56274f7d05c1fb4ad2d8dd384fdb7bc178220f07481236c8b2.json b/backend/.sqlx/query-7f0faa7ca48a6a56274f7d05c1fb4ad2d8dd384fdb7bc178220f07481236c8b2.json new file mode 100644 index 0000000..f1f1f96 --- /dev/null +++ b/backend/.sqlx/query-7f0faa7ca48a6a56274f7d05c1fb4ad2d8dd384fdb7bc178220f07481236c8b2.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE gitlab_issues SET proposal_id = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "7f0faa7ca48a6a56274f7d05c1fb4ad2d8dd384fdb7bc178220f07481236c8b2" +} diff --git a/backend/.sqlx/query-7f497e0097bf329922d709282092b588975f7c2d8c508fbe49ab3d9d4c99c396.json b/backend/.sqlx/query-7f497e0097bf329922d709282092b588975f7c2d8c508fbe49ab3d9d4c99c396.json new file mode 100644 index 0000000..a35c6c1 --- /dev/null +++ b/backend/.sqlx/query-7f497e0097bf329922d709282092b588975f7c2d8c508fbe49ab3d9d4c99c396.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM community_members WHERE community_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7f497e0097bf329922d709282092b588975f7c2d8c508fbe49ab3d9d4c99c396" +} diff --git a/backend/.sqlx/query-7fdca4a1f7c40789d44de54a739eca0bb898c9fbf3e89784e2f784630c56dba5.json b/backend/.sqlx/query-7fdca4a1f7c40789d44de54a739eca0bb898c9fbf3e89784e2f784630c56dba5.json new file mode 100644 index 0000000..14c70fe --- /dev/null +++ b/backend/.sqlx/query-7fdca4a1f7c40789d44de54a739eca0bb898c9fbf3e89784e2f784630c56dba5.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, stance::text AS \"stance!\",\n title, content, author_id,\n upvotes, downvotes, quality_score::float8 AS quality_score\n FROM deliberation_arguments\n WHERE proposal_id = $1 AND NOT is_hidden AND parent_id IS NULL\n ORDER BY quality_score DESC NULLS LAST\n LIMIT $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "stance!", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "upvotes", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "downvotes", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "quality_score", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8" + ] + }, + "nullable": [ + false, + false, + null, + false, + false, + false, + false, + false, + null + ] + }, + "hash": "7fdca4a1f7c40789d44de54a739eca0bb898c9fbf3e89784e2f784630c56dba5" +} diff --git a/backend/.sqlx/query-82aa3e3878c98cbdcc5bbefd11acd7b19521314a38ab9085516eb81e39354d2a.json b/backend/.sqlx/query-82aa3e3878c98cbdcc5bbefd11acd7b19521314a38ab9085516eb81e39354d2a.json new file mode 100644 index 0000000..e52edbc --- /dev/null +++ b/backend/.sqlx/query-82aa3e3878c98cbdcc5bbefd11acd7b19521314a38ab9085516eb81e39354d2a.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT voter_id, option_id, rank FROM ranked_votes \n WHERE proposal_id = $1 ORDER BY voter_id, rank", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "voter_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "option_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "rank", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "82aa3e3878c98cbdcc5bbefd11acd7b19521314a38ab9085516eb81e39354d2a" +} diff --git a/backend/.sqlx/query-860d54897714862bf6312fc00b440d414b9083efd695f04c31f26510d0254208.json b/backend/.sqlx/query-860d54897714862bf6312fc00b440d414b9083efd695f04c31f26510d0254208.json new file mode 100644 index 0000000..7fb8c48 --- /dev/null +++ b/backend/.sqlx/query-860d54897714862bf6312fc00b440d414b9083efd695f04c31f26510d0254208.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT c.id, c.proposal_id, c.author_id, c.content, c.parent_id, c.created_at,\n u.username as author_name\n FROM comments c\n JOIN users u ON c.author_id = u.id\n WHERE c.proposal_id = $1\n ORDER BY c.created_at ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "parent_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "author_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false + ] + }, + "hash": "860d54897714862bf6312fc00b440d414b9083efd695f04c31f26510d0254208" +} diff --git a/backend/.sqlx/query-865245efea368b2c0b937356889a07b9a04954165e700f4c7c35687c13f2be27.json b/backend/.sqlx/query-865245efea368b2c0b937356889a07b9a04954165e700f4c7c35687c13f2be27.json new file mode 100644 index 0000000..8f89b62 --- /dev/null +++ b/backend/.sqlx/query-865245efea368b2c0b937356889a07b9a04954165e700f4c7c35687c13f2be27.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n total_violations, confirmed_violations, dismissed_violations,\n total_sanctions, active_sanctions, warnings_count,\n current_escalation_level, last_violation_at, is_in_good_standing\n FROM user_violation_summary\n WHERE user_id = $1 AND community_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_violations", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "confirmed_violations", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "dismissed_violations", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "total_sanctions", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "active_sanctions", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "warnings_count", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "current_escalation_level", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "last_violation_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "is_in_good_standing", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false + ] + }, + "hash": "865245efea368b2c0b937356889a07b9a04954165e700f4c7c35687c13f2be27" +} diff --git a/backend/.sqlx/query-86645359fb0da9b7441326acb5ac4d8be2c0eda7f3ef0d97d0b4d661aa0d62eb.json b/backend/.sqlx/query-86645359fb0da9b7441326acb5ac4d8be2c0eda7f3ef0d97d0b4d661aa0d62eb.json new file mode 100644 index 0000000..5f56ce5 --- /dev/null +++ b/backend/.sqlx/query-86645359fb0da9b7441326acb5ac4d8be2c0eda7f3ef0d97d0b4d661aa0d62eb.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO roles (name, display_name, description, color, community_id, is_default, priority)\n VALUES ($1, $2, $3, $4, $5, $6, $7)\n RETURNING id, name, display_name, description, color, is_system, is_default, priority", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "color", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_system", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "priority", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text", + "Varchar", + "Uuid", + "Bool", + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + false + ] + }, + "hash": "86645359fb0da9b7441326acb5ac4d8be2c0eda7f3ef0d97d0b4d661aa0d62eb" +} diff --git a/backend/.sqlx/query-87426464ee1ed01c4e1940b86ed461af98bcfe50c40df7c869c1c03395a79e07.json b/backend/.sqlx/query-87426464ee1ed01c4e1940b86ed461af98bcfe50c40df7c869c1c03395a79e07.json new file mode 100644 index 0000000..d4ec549 --- /dev/null +++ b/backend/.sqlx/query-87426464ee1ed01c4e1940b86ed461af98bcfe50c40df7c869c1c03395a79e07.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE conflict_cases SET status = 'proposal_phase', updated_at = NOW() WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "87426464ee1ed01c4e1940b86ed461af98bcfe50c40df7c869c1c03395a79e07" +} diff --git a/backend/.sqlx/query-87f52f0262716fe497a6e8b42722cc11f88dacaa0bca746ebd5b474a734d9bb3.json b/backend/.sqlx/query-87f52f0262716fe497a6e8b42722cc11f88dacaa0bca746ebd5b474a734d9bb3.json new file mode 100644 index 0000000..53db0df --- /dev/null +++ b/backend/.sqlx/query-87f52f0262716fe497a6e8b42722cc11f88dacaa0bca746ebd5b474a734d9bb3.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n total_conflicts::bigint,\n resolved_conflicts::bigint,\n avg_resolution_days::float8,\n mediation_success_rate::float8,\n active_conflicts::bigint\n FROM get_conflict_statistics($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_conflicts", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "resolved_conflicts", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "avg_resolution_days", + "type_info": "Float8" + }, + { + "ordinal": 3, + "name": "mediation_success_rate", + "type_info": "Float8" + }, + { + "ordinal": 4, + "name": "active_conflicts", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null + ] + }, + "hash": "87f52f0262716fe497a6e8b42722cc11f88dacaa0bca746ebd5b474a734d9bb3" +} diff --git a/backend/.sqlx/query-88ca29971ada21e0ff2b92315e39c7aefbd81215862c5c7149e2dbca93c5d1fe.json b/backend/.sqlx/query-88ca29971ada21e0ff2b92315e39c7aefbd81215862c5c7149e2dbca93c5d1fe.json new file mode 100644 index 0000000..0b89268 --- /dev/null +++ b/backend/.sqlx/query-88ca29971ada21e0ff2b92315e39c7aefbd81215862c5c7149e2dbca93c5d1fe.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n proposals_in_pipeline,\n proposals_needing_review,\n proposals_in_voting,\n decisions_made,\n avg_decision_time_hours::float8 AS avg_time,\n quorum_achievement_rate::float8 AS quorum_rate,\n stalled_proposals,\n bottleneck_phase\n FROM decision_load_metrics\n WHERE community_id = $1\n ORDER BY period_end DESC\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "proposals_in_pipeline", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "proposals_needing_review", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "proposals_in_voting", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "decisions_made", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "avg_time", + "type_info": "Float8" + }, + { + "ordinal": 5, + "name": "quorum_rate", + "type_info": "Float8" + }, + { + "ordinal": 6, + "name": "stalled_proposals", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "bottleneck_phase", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + null, + false, + true + ] + }, + "hash": "88ca29971ada21e0ff2b92315e39c7aefbd81215862c5c7149e2dbca93c5d1fe" +} diff --git a/backend/.sqlx/query-8932b373514ebf8056f5934c5b02dd675f6fe1b1317688676d5c32e1f01065a7.json b/backend/.sqlx/query-8932b373514ebf8056f5934c5b02dd675f6fe1b1317688676d5c32e1f01065a7.json new file mode 100644 index 0000000..3b5d332 --- /dev/null +++ b/backend/.sqlx/query-8932b373514ebf8056f5934c5b02dd675f6fe1b1317688676d5c32e1f01065a7.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, username, email, display_name, status, created_at, expires_at\n FROM pending_registrations\n WHERE status = $1\n ORDER BY created_at DESC\n LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "status", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "expires_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + true + ] + }, + "hash": "8932b373514ebf8056f5934c5b02dd675f6fe1b1317688676d5c32e1f01065a7" +} diff --git a/backend/.sqlx/query-89804ccea9075b137f7c7e47917f3477f770dfd7550ab97060b981849618a201.json b/backend/.sqlx/query-89804ccea9075b137f7c7e47917f3477f770dfd7550ab97060b981849618a201.json new file mode 100644 index 0000000..7eca8a6 --- /dev/null +++ b/backend/.sqlx/query-89804ccea9075b137f7c7e47917f3477f770dfd7550ab97060b981849618a201.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT approve_registration($1, $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "approve_registration", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "89804ccea9075b137f7c7e47917f3477f770dfd7550ab97060b981849618a201" +} diff --git a/backend/.sqlx/query-8b2ab82dd4fe83655e074b1edc9e80e2ca276a327e044ad5578fb15eba3ac3ee.json b/backend/.sqlx/query-8b2ab82dd4fe83655e074b1edc9e80e2ca276a327e044ad5578fb15eba3ac3ee.json new file mode 100644 index 0000000..67bdd14 --- /dev/null +++ b/backend/.sqlx/query-8b2ab82dd4fe83655e074b1edc9e80e2ca276a327e044ad5578fb15eba3ac3ee.json @@ -0,0 +1,60 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, name, category as \"category: PermissionCategory\", description, is_system\n FROM permissions ORDER BY category, name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "category: PermissionCategory", + "type_info": { + "Custom": { + "name": "permission_category", + "kind": { + "Enum": [ + "platform", + "community", + "proposals", + "voting", + "moderation", + "plugins", + "users", + "integrations" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "is_system", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + false + ] + }, + "hash": "8b2ab82dd4fe83655e074b1edc9e80e2ca276a327e044ad5578fb15eba3ac3ee" +} diff --git a/backend/.sqlx/query-8d3042b1d0415262c8729e52a4d8933bf52b484798b7e3530219b0117a60f5d1.json b/backend/.sqlx/query-8d3042b1d0415262c8729e52a4d8933bf52b484798b7e3530219b0117a60f5d1.json new file mode 100644 index 0000000..ace0d56 --- /dev/null +++ b/backend/.sqlx/query-8d3042b1d0415262c8729e52a4d8933bf52b484798b7e3530219b0117a60f5d1.json @@ -0,0 +1,91 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT i.*, u.username as creator_username, c.name as community_name\n FROM invitations i\n LEFT JOIN users u ON u.id = i.created_by\n LEFT JOIN communities c ON c.id = i.community_id\n WHERE ($1::boolean = true OR i.created_by = $2)\n AND ($3::uuid IS NULL OR i.community_id = $3)\n AND ($4::boolean IS NULL OR i.is_active = $4)\n ORDER BY i.created_at DESC\n LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "code", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "created_by", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "max_uses", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "uses_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "expires_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "creator_username", + "type_info": "Varchar" + }, + { + "ordinal": 11, + "name": "community_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Bool", + "Uuid", + "Uuid", + "Bool" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + false, + false + ] + }, + "hash": "8d3042b1d0415262c8729e52a4d8933bf52b484798b7e3530219b0117a60f5d1" +} diff --git a/backend/.sqlx/query-8d94635535054f82b5185372543c7347a8fe98d8402d99130a29520eab53bc73.json b/backend/.sqlx/query-8d94635535054f82b5185372543c7347a8fe98d8402d99130a29520eab53bc73.json new file mode 100644 index 0000000..6e5ee07 --- /dev/null +++ b/backend/.sqlx/query-8d94635535054f82b5185372543c7347a8fe98d8402d99130a29520eab53bc73.json @@ -0,0 +1,108 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id,\n sequence_number,\n community_id,\n actor_user_id,\n actor_role,\n action_type::text AS \"action_type!\",\n target_type,\n target_id,\n reason,\n rule_reference,\n evidence,\n duration_hours,\n decision_type,\n entry_hash,\n created_at\n FROM moderation_ledger\n WHERE community_id IS NOT DISTINCT FROM $1\n ORDER BY sequence_number DESC\n LIMIT $2 OFFSET $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "sequence_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "actor_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "actor_role", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "action_type!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "target_type", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "target_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "rule_reference", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "evidence", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "entry_hash", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + null, + false, + false, + false, + true, + true, + true, + false, + false, + false + ] + }, + "hash": "8d94635535054f82b5185372543c7347a8fe98d8402d99130a29520eab53bc73" +} diff --git a/backend/.sqlx/query-8db75a6ddf63baa3ee89defbedaefb53f61892395025de17dce21451f81ba8b1.json b/backend/.sqlx/query-8db75a6ddf63baa3ee89defbedaefb53f61892395025de17dce21451f81ba8b1.json new file mode 100644 index 0000000..e9b0a9a --- /dev/null +++ b/backend/.sqlx/query-8db75a6ddf63baa3ee89defbedaefb53f61892395025de17dce21451f81ba8b1.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO voting_identities (user_id, community_id, pseudonym)\n VALUES ($1, $2, $3)\n ON CONFLICT (user_id, community_id) DO UPDATE SET user_id = $1\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar" + ] + }, + "nullable": [ + false + ] + }, + "hash": "8db75a6ddf63baa3ee89defbedaefb53f61892395025de17dce21451f81ba8b1" +} diff --git a/backend/.sqlx/query-8e856e6e4081d2ca26e5e6097848fab90e055cbbc318956a3869750a0855fe3f.json b/backend/.sqlx/query-8e856e6e4081d2ca26e5e6097848fab90e055cbbc318956a3869750a0855fe3f.json new file mode 100644 index 0000000..962c88a --- /dev/null +++ b/backend/.sqlx/query-8e856e6e4081d2ca26e5e6097848fab90e055cbbc318956a3869750a0855fe3f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO phase_participation (phase_instance_id, user_id, participation_type)\n SELECT pi.id, $2, $3\n FROM phase_instances pi\n JOIN workflow_instances wi ON wi.id = pi.workflow_instance_id\n WHERE wi.proposal_id = $1 AND pi.status = 'active'\n ON CONFLICT (phase_instance_id, user_id, participation_type) DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "8e856e6e4081d2ca26e5e6097848fab90e055cbbc318956a3869750a0855fe3f" +} diff --git a/backend/.sqlx/query-917a021a7c54c19e1257d558ec4e40c0001c975d00af6255ab2707d3ebb045ac.json b/backend/.sqlx/query-917a021a7c54c19e1257d558ec4e40c0001c975d00af6255ab2707d3ebb045ac.json new file mode 100644 index 0000000..a7035a2 --- /dev/null +++ b/backend/.sqlx/query-917a021a7c54c19e1257d558ec4e40c0001c975d00af6255ab2707d3ebb045ac.json @@ -0,0 +1,82 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, name, display_name, description, icon, is_active, is_default,\n config_schema, default_config, complexity_level, supports_delegation\n FROM voting_method_plugins WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "icon", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "config_schema", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "default_config", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "complexity_level", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "supports_delegation", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + false + ] + }, + "hash": "917a021a7c54c19e1257d558ec4e40c0001c975d00af6255ab2707d3ebb045ac" +} diff --git a/backend/.sqlx/query-91b64087ce497ce4426c00ccdd8cbfde639f18d24df8362c11a211a9acf2fc92.json b/backend/.sqlx/query-91b64087ce497ce4426c00ccdd8cbfde639f18d24df8362c11a211a9acf2fc92.json new file mode 100644 index 0000000..cbecea7 --- /dev/null +++ b/backend/.sqlx/query-91b64087ce497ce4426c00ccdd8cbfde639f18d24df8362c11a211a9acf2fc92.json @@ -0,0 +1,107 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id,\n sequence_number,\n community_id,\n actor_user_id,\n actor_role,\n action_type::text AS \"action_type!\",\n target_type,\n target_id,\n reason,\n rule_reference,\n evidence,\n duration_hours,\n decision_type,\n entry_hash,\n created_at\n FROM moderation_ledger\n WHERE target_type = $1 AND target_id = $2\n ORDER BY sequence_number DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "sequence_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "actor_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "actor_role", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "action_type!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "target_type", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "target_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "reason", + "type_info": "Text" + }, + { + "ordinal": 9, + "name": "rule_reference", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "evidence", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "decision_type", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "entry_hash", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + false, + false, + null, + false, + false, + false, + true, + true, + true, + false, + false, + false + ] + }, + "hash": "91b64087ce497ce4426c00ccdd8cbfde639f18d24df8362c11a211a9acf2fc92" +} diff --git a/backend/.sqlx/query-92461256ad7b62764b2bd75674ccbfc11df6648d6d856e3e68fc80801457c555.json b/backend/.sqlx/query-92461256ad7b62764b2bd75674ccbfc11df6648d6d856e3e68fc80801457c555.json new file mode 100644 index 0000000..70fb272 --- /dev/null +++ b/backend/.sqlx/query-92461256ad7b62764b2bd75674ccbfc11df6648d6d856e3e68fc80801457c555.json @@ -0,0 +1,72 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO communities (name, slug, description)\n VALUES ($1, $2, $3)\n RETURNING *\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "settings", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "92461256ad7b62764b2bd75674ccbfc11df6648d6d856e3e68fc80801457c555" +} diff --git a/backend/.sqlx/query-93883e88af152aca23a39dadd95669f025294cb532a892e58891a0e756e9c784.json b/backend/.sqlx/query-93883e88af152aca23a39dadd95669f025294cb532a892e58891a0e756e9c784.json new file mode 100644 index 0000000..bfc7f44 --- /dev/null +++ b/backend/.sqlx/query-93883e88af152aca23a39dadd95669f025294cb532a892e58891a0e756e9c784.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, version_number, title, content,\n change_type, change_summary, created_by, created_at\n FROM proposal_versions\n WHERE proposal_id = $1\n ORDER BY version_number DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "version_number", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "change_type", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "change_summary", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "created_by", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + false, + false + ] + }, + "hash": "93883e88af152aca23a39dadd95669f025294cb532a892e58891a0e756e9c784" +} diff --git a/backend/.sqlx/query-93c4fe897770127740a52c43b14f2007bfa6faf8743dc07611ac22e9140e89f6.json b/backend/.sqlx/query-93c4fe897770127740a52c43b14f2007bfa6faf8743dc07611ac22e9140e89f6.json new file mode 100644 index 0000000..c3313f1 --- /dev/null +++ b/backend/.sqlx/query-93c4fe897770127740a52c43b14f2007bfa6faf8743dc07611ac22e9140e89f6.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT reject_community($1, $2, $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "reject_community", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "93c4fe897770127740a52c43b14f2007bfa6faf8743dc07611ac22e9140e89f6" +} diff --git a/backend/.sqlx/query-942753e2a11713b9d4b46a52f4a6426629f95c7a91598f351741f9bc100e243b.json b/backend/.sqlx/query-942753e2a11713b9d4b46a52f4a6426629f95c7a91598f351741f9bc100e243b.json new file mode 100644 index 0000000..79aac9c --- /dev/null +++ b/backend/.sqlx/query-942753e2a11713b9d4b46a52f4a6426629f95c7a91598f351741f9bc100e243b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE compromise_proposals SET\n party_a_response = $2,\n party_a_response_at = NOW(),\n party_a_feedback = $3,\n updated_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "942753e2a11713b9d4b46a52f4a6426629f95c7a91598f351741f9bc100e243b" +} diff --git a/backend/.sqlx/query-94499cff482a1533a32723bc9924bf62af0a9ec7cfe338b1c97336e6a1ea5dee.json b/backend/.sqlx/query-94499cff482a1533a32723bc9924bf62af0a9ec7cfe338b1c97336e6a1ea5dee.json new file mode 100644 index 0000000..83e8da6 --- /dev/null +++ b/backend/.sqlx/query-94499cff482a1533a32723bc9924bf62af0a9ec7cfe338b1c97336e6a1ea5dee.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, remote_instance_id, remote_community_id\n FROM community_federations\n WHERE local_community_id = $1 \n AND status = 'active' \n AND sync_proposals = true\n AND sync_direction IN ('push', 'bidirectional')", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "remote_instance_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "remote_community_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "94499cff482a1533a32723bc9924bf62af0a9ec7cfe338b1c97336e6a1ea5dee" +} diff --git a/backend/.sqlx/query-94b98a4fccec6a48b27cdf8d3ea8740a81baac9e53a5c458acb7f9692893f069.json b/backend/.sqlx/query-94b98a4fccec6a48b27cdf8d3ea8740a81baac9e53a5c458acb7f9692893f069.json new file mode 100644 index 0000000..bd237cb --- /dev/null +++ b/backend/.sqlx/query-94b98a4fccec6a48b27cdf8d3ea8740a81baac9e53a5c458acb7f9692893f069.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO comments (proposal_id, author_id, content, parent_id)\n VALUES ($1, $2, $3, $4)\n RETURNING id, created_at\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text", + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "94b98a4fccec6a48b27cdf8d3ea8740a81baac9e53a5c458acb7f9692893f069" +} diff --git a/backend/.sqlx/query-957b131c5ae23e306fe4634db068c611122ae61057c805c82413fb69ed015c58.json b/backend/.sqlx/query-957b131c5ae23e306fe4634db068c611122ae61057c805c82413fb69ed015c58.json new file mode 100644 index 0000000..73896ad --- /dev/null +++ b/backend/.sqlx/query-957b131c5ae23e306fe4634db068c611122ae61057c805c82413fb69ed015c58.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_settings (community_id) VALUES ($1) ON CONFLICT DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "957b131c5ae23e306fe4634db068c611122ae61057c805c82413fb69ed015c58" +} diff --git a/backend/.sqlx/query-95f70efea97448688250d1647387c7d3dc1e4eec0dc2e6ba88f09e9746132d09.json b/backend/.sqlx/query-95f70efea97448688250d1647387c7d3dc1e4eec0dc2e6ba88f09e9746132d09.json new file mode 100644 index 0000000..7743cd3 --- /dev/null +++ b/backend/.sqlx/query-95f70efea97448688250d1647387c7d3dc1e4eec0dc2e6ba88f09e9746132d09.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE phase_instances pi\n SET participant_count = (\n SELECT COUNT(DISTINCT user_id) \n FROM phase_participation \n WHERE phase_instance_id = pi.id\n )\n FROM workflow_instances wi\n WHERE wi.id = pi.workflow_instance_id \n AND wi.proposal_id = $1 \n AND pi.status = 'active'", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "95f70efea97448688250d1647387c7d3dc1e4eec0dc2e6ba88f09e9746132d09" +} diff --git a/backend/.sqlx/query-965e42468c325a893d39c2eeefbea254b37e04f0d067d88988cf4ece97b0e818.json b/backend/.sqlx/query-965e42468c325a893d39c2eeefbea254b37e04f0d067d88988cf4ece97b0e818.json new file mode 100644 index 0000000..8d33eb0 --- /dev/null +++ b/backend/.sqlx/query-965e42468c325a893d39c2eeefbea254b37e04f0d067d88988cf4ece97b0e818.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO ranked_votes (proposal_id, voter_id, option_id, rank) VALUES ($1, $2, $3, $4)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "965e42468c325a893d39c2eeefbea254b37e04f0d067d88988cf4ece97b0e818" +} diff --git a/backend/.sqlx/query-9888b553ae9f5cd32aa1eb6cfee1abecc08787a0f7fd7526956d30d62c1801ff.json b/backend/.sqlx/query-9888b553ae9f5cd32aa1eb6cfee1abecc08787a0f7fd7526956d30d62c1801ff.json new file mode 100644 index 0000000..77be73c --- /dev/null +++ b/backend/.sqlx/query-9888b553ae9f5cd32aa1eb6cfee1abecc08787a0f7fd7526956d30d62c1801ff.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT gi.id, gi.gitlab_iid, gi.title, gi.description, gi.state,\n gi.author_username, gi.labels, gi.proposal_id, gi.gitlab_created_at\n FROM gitlab_issues gi\n JOIN gitlab_connections gc ON gi.connection_id = gc.id\n WHERE gc.community_id = $1\n ORDER BY gi.gitlab_iid DESC\n LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "gitlab_iid", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "state", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "author_username", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "labels", + "type_info": "TextArray" + }, + { + "ordinal": 7, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "gitlab_created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + true, + true, + true, + true + ] + }, + "hash": "9888b553ae9f5cd32aa1eb6cfee1abecc08787a0f7fd7526956d30d62c1801ff" +} diff --git a/backend/.sqlx/query-9b9965ea0437e0f8e3467ba2b237d6a84c74bbc03c389117d8f3f6e71273b587.json b/backend/.sqlx/query-9b9965ea0437e0f8e3467ba2b237d6a84c74bbc03c389117d8f3f6e71273b587.json new file mode 100644 index 0000000..d5568d2 --- /dev/null +++ b/backend/.sqlx/query-9b9965ea0437e0f8e3467ba2b237d6a84c74bbc03c389117d8f3f6e71273b587.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM invitations WHERE code = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "9b9965ea0437e0f8e3467ba2b237d6a84c74bbc03c389117d8f3f6e71273b587" +} diff --git a/backend/.sqlx/query-9beff9c1f4daa7453bd6f13f3692b320f1f82ca87bed75f8a85e87882ac4bfc1.json b/backend/.sqlx/query-9beff9c1f4daa7453bd6f13f3692b320f1f82ca87bed75f8a85e87882ac4bfc1.json new file mode 100644 index 0000000..5535bf2 --- /dev/null +++ b/backend/.sqlx/query-9beff9c1f4daa7453bd6f13f3692b320f1f82ca87bed75f8a85e87882ac4bfc1.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, title, description,\n status::text AS \"status!\", proposed_by,\n support_count, oppose_count\n FROM proposal_amendments\n WHERE proposal_id = $1\n ORDER BY proposed_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "proposed_by", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "support_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "oppose_count", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + false, + false, + false + ] + }, + "hash": "9beff9c1f4daa7453bd6f13f3692b320f1f82ca87bed75f8a85e87882ac4bfc1" +} diff --git a/backend/.sqlx/query-9c3b9cd35a4c58e67a15ad0372c18b6c5fc0a969b9aec88508cea8388c2fbe55.json b/backend/.sqlx/query-9c3b9cd35a4c58e67a15ad0372c18b6c5fc0a969b9aec88508cea8388c2fbe55.json new file mode 100644 index 0000000..2ffcc0f --- /dev/null +++ b/backend/.sqlx/query-9c3b9cd35a4c58e67a15ad0372c18b6c5fc0a969b9aec88508cea8388c2fbe55.json @@ -0,0 +1,55 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposal_positions (proposal_id, user_id, position, reasoning)\n VALUES ($1, $2, $3, $4)\n ON CONFLICT (proposal_id, user_id) \n DO UPDATE SET position = $3, reasoning = $4, updated_at = NOW()\n RETURNING id, proposal_id, user_id, position, reasoning, created_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "user_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "position", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "reasoning", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false + ] + }, + "hash": "9c3b9cd35a4c58e67a15ad0372c18b6c5fc0a969b9aec88508cea8388c2fbe55" +} diff --git a/backend/.sqlx/query-9c4016766b52e727103f5d09c7572fa90d793c5b391cfe99809d2d8825b9c9d6.json b/backend/.sqlx/query-9c4016766b52e727103f5d09c7572fa90d793c5b391cfe99809d2d8825b9c9d6.json new file mode 100644 index 0000000..40e1039 --- /dev/null +++ b/backend/.sqlx/query-9c4016766b52e727103f5d09c7572fa90d793c5b391cfe99809d2d8825b9c9d6.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT c.id, c.name, c.slug, cm.role\n FROM communities c\n JOIN community_members cm ON c.id = cm.community_id\n WHERE cm.user_id = $1 AND c.is_active = true\n ORDER BY cm.joined_at DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "role", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "9c4016766b52e727103f5d09c7572fa90d793c5b391cfe99809d2d8825b9c9d6" +} diff --git a/backend/.sqlx/query-9e78a21e3bae2928e4ce353423030c59deb97bdbf242e8d4b8e2af11d65596f1.json b/backend/.sqlx/query-9e78a21e3bae2928e4ce353423030c59deb97bdbf242e8d4b8e2af11d65596f1.json new file mode 100644 index 0000000..446b0b0 --- /dev/null +++ b/backend/.sqlx/query-9e78a21e3bae2928e4ce353423030c59deb97bdbf242e8d4b8e2af11d65596f1.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n v.id, v.proposal_id,\n encode(sha256(v.voter_id::text::bytea), 'hex') AS voter_hash,\n v.created_at\n FROM votes v\n JOIN proposals p ON p.id = v.proposal_id\n WHERE p.community_id = $1\n ORDER BY v.created_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "voter_hash", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + null, + false + ] + }, + "hash": "9e78a21e3bae2928e4ce353423030c59deb97bdbf242e8d4b8e2af11d65596f1" +} diff --git a/backend/.sqlx/query-9f011d836f16bc90ba585c43424fbb960ee5cadd5f50088e252655f5f79612fe.json b/backend/.sqlx/query-9f011d836f16bc90ba585c43424fbb960ee5cadd5f50088e252655f5f79612fe.json new file mode 100644 index 0000000..ad9a83a --- /dev/null +++ b/backend/.sqlx/query-9f011d836f16bc90ba585c43424fbb960ee5cadd5f50088e252655f5f79612fe.json @@ -0,0 +1,76 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n pl.current_status::text AS \"status!\",\n pl.current_version,\n pl.submitted_at,\n pl.activated_at,\n pl.voting_started_at,\n pl.resolved_at,\n pl.revision_count,\n pl.fork_count,\n pl.amendment_count,\n pl.forked_from_id\n FROM proposal_lifecycle pl\n WHERE pl.proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "current_version", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "submitted_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "activated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "voting_started_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "resolved_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "revision_count", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "fork_count", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "amendment_count", + "type_info": "Int4" + }, + { + "ordinal": 9, + "name": "forked_from_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + false, + true, + true, + true, + true, + false, + false, + false, + true + ] + }, + "hash": "9f011d836f16bc90ba585c43424fbb960ee5cadd5f50088e252655f5f79612fe" +} diff --git a/backend/.sqlx/query-a1df48d19975013b68c2e57f90a4c0691d7f31a83ffc92628d918959daba3e36.json b/backend/.sqlx/query-a1df48d19975013b68c2e57f90a4c0691d7f31a83ffc92628d918959daba3e36.json new file mode 100644 index 0000000..5a615e6 --- /dev/null +++ b/backend/.sqlx/query-a1df48d19975013b68c2e57f90a4c0691d7f31a83ffc92628d918959daba3e36.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(DISTINCT voter_id) FROM votes WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a1df48d19975013b68c2e57f90a4c0691d7f31a83ffc92628d918959daba3e36" +} diff --git a/backend/.sqlx/query-a2d2cf6227a9e713d29610e2c0f0e94146f1798257a9dd68325487329eb6e24c.json b/backend/.sqlx/query-a2d2cf6227a9e713d29610e2c0f0e94146f1798257a9dd68325487329eb6e24c.json new file mode 100644 index 0000000..269f807 --- /dev/null +++ b/backend/.sqlx/query-a2d2cf6227a9e713d29610e2c0f0e94146f1798257a9dd68325487329eb6e24c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE federated_instances SET last_sync_at = NOW(), total_syncs = total_syncs + 1 WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "a2d2cf6227a9e713d29610e2c0f0e94146f1798257a9dd68325487329eb6e24c" +} diff --git a/backend/.sqlx/query-a57268c9e448ddc42290fce152a7d81d332eccd8c90b962b7ef0cdaba6d8a3b0.json b/backend/.sqlx/query-a57268c9e448ddc42290fce152a7d81d332eccd8c90b962b7ef0cdaba6d8a3b0.json new file mode 100644 index 0000000..56c509d --- /dev/null +++ b/backend/.sqlx/query-a57268c9e448ddc42290fce152a7d81d332eccd8c90b962b7ef0cdaba6d8a3b0.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM get_exportable_proposals($1, true, NULL, NULL)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "author_id", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "status", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "vote_count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null, + null, + null + ] + }, + "hash": "a57268c9e448ddc42290fce152a7d81d332eccd8c90b962b7ef0cdaba6d8a3b0" +} diff --git a/backend/.sqlx/query-a65fb2ba103206d6ad4c761ec1fb62638fc50474679c751183013efc7cb0edbd.json b/backend/.sqlx/query-a65fb2ba103206d6ad4c761ec1fb62638fc50474679c751183013efc7cb0edbd.json new file mode 100644 index 0000000..57ac15a --- /dev/null +++ b/backend/.sqlx/query-a65fb2ba103206d6ad4c761ec1fb62638fc50474679c751183013efc7cb0edbd.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO user_roles (user_id, role_id, community_id, granted_by, expires_at)\n VALUES ($1, $2, $3, $4, $5)\n ON CONFLICT (user_id, role_id, community_id) DO UPDATE SET\n granted_by = $4, expires_at = $5, granted_at = NOW()", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Uuid", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "a65fb2ba103206d6ad4c761ec1fb62638fc50474679c751183013efc7cb0edbd" +} diff --git a/backend/.sqlx/query-a8414c7b067366f56d56b3a465b248209e3800c25fcea8dadc2f63fdca72e82f.json b/backend/.sqlx/query-a8414c7b067366f56d56b3a465b248209e3800c25fcea8dadc2f63fdca72e82f.json new file mode 100644 index 0000000..da3dcd4 --- /dev/null +++ b/backend/.sqlx/query-a8414c7b067366f56d56b3a465b248209e3800c25fcea8dadc2f63fdca72e82f.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, name, version, description, is_core, is_active, settings_schema FROM plugins WHERE name = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "version", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "is_core", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "settings_schema", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + true + ] + }, + "hash": "a8414c7b067366f56d56b3a465b248209e3800c25fcea8dadc2f63fdca72e82f" +} diff --git a/backend/.sqlx/query-a9d4611cf695893da6a962355b974077340a56497e7bc9100f288cb1195267ad.json b/backend/.sqlx/query-a9d4611cf695893da6a962355b974077340a56497e7bc9100f288cb1195267ad.json new file mode 100644 index 0000000..4a34a03 --- /dev/null +++ b/backend/.sqlx/query-a9d4611cf695893da6a962355b974077340a56497e7bc9100f288cb1195267ad.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE export_jobs SET \n status = 'completed', completed_at = NOW(),\n record_count = $2, file_size_bytes = $3,\n download_expires_at = NOW() + INTERVAL '7 days'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a9d4611cf695893da6a962355b974077340a56497e7bc9100f288cb1195267ad" +} diff --git a/backend/.sqlx/query-aa35c7c6645c9a7637f083650a2d9c898e15c49cc49a7df001fa6861ab47995e.json b/backend/.sqlx/query-aa35c7c6645c9a7637f083650a2d9c898e15c49cc49a7df001fa6861ab47995e.json new file mode 100644 index 0000000..5603e47 --- /dev/null +++ b/backend/.sqlx/query-aa35c7c6645c9a7637f083650a2d9c898e15c49cc49a7df001fa6861ab47995e.json @@ -0,0 +1,75 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO gitlab_connections \n (community_id, gitlab_url, project_path, sync_issues, sync_merge_requests, auto_create_proposals)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (community_id) DO UPDATE SET\n gitlab_url = $2,\n project_path = $3,\n sync_issues = COALESCE($4, gitlab_connections.sync_issues),\n sync_merge_requests = COALESCE($5, gitlab_connections.sync_merge_requests),\n auto_create_proposals = COALESCE($6, gitlab_connections.auto_create_proposals),\n updated_at = NOW()\n RETURNING id, community_id, gitlab_url, project_path, is_active,\n sync_issues, sync_merge_requests, auto_create_proposals, last_synced_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "gitlab_url", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "project_path", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "sync_issues", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "sync_merge_requests", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "auto_create_proposals", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "last_synced_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Bool", + "Bool", + "Bool" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "aa35c7c6645c9a7637f083650a2d9c898e15c49cc49a7df001fa6861ab47995e" +} diff --git a/backend/.sqlx/query-aa69a761eb48721491a2275dc2040ac0d58ce45701dba74214e2af3685db59f1.json b/backend/.sqlx/query-aa69a761eb48721491a2275dc2040ac0d58ce45701dba74214e2af3685db59f1.json new file mode 100644 index 0000000..33bb440 --- /dev/null +++ b/backend/.sqlx/query-aa69a761eb48721491a2275dc2040ac0d58ce45701dba74214e2af3685db59f1.json @@ -0,0 +1,153 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposals \n SET title = COALESCE($1, title), \n description = COALESCE($2, description),\n updated_at = NOW()\n WHERE id = $3\n RETURNING id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "aa69a761eb48721491a2275dc2040ac0d58ce45701dba74214e2af3685db59f1" +} diff --git a/backend/.sqlx/query-aadb91fe11c7e3106062a2055b0196ca12ed20e40513bc14f11f4a8bd5623a3f.json b/backend/.sqlx/query-aadb91fe11c7e3106062a2055b0196ca12ed20e40513bc14f11f4a8bd5623a3f.json new file mode 100644 index 0000000..e659c24 --- /dev/null +++ b/backend/.sqlx/query-aadb91fe11c7e3106062a2055b0196ca12ed20e40513bc14f11f4a8bd5623a3f.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegate_profiles (user_id, total_delegators)\n VALUES ($1, 1)\n ON CONFLICT (user_id) DO UPDATE SET total_delegators = delegate_profiles.total_delegators + 1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "aadb91fe11c7e3106062a2055b0196ca12ed20e40513bc14f11f4a8bd5623a3f" +} diff --git a/backend/.sqlx/query-ad438c18ca77e186c4e33a8079374181a47fbbec6a5f0ae5fe2144e342910d40.json b/backend/.sqlx/query-ad438c18ca77e186c4e33a8079374181a47fbbec6a5f0ae5fe2144e342910d40.json new file mode 100644 index 0000000..8998467 --- /dev/null +++ b/backend/.sqlx/query-ad438c18ca77e186c4e33a8079374181a47fbbec6a5f0ae5fe2144e342910d40.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM star_votes WHERE proposal_id = $1 AND voter_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "ad438c18ca77e186c4e33a8079374181a47fbbec6a5f0ae5fe2144e342910d40" +} diff --git a/backend/.sqlx/query-ad82dc4820ab69556cba650ae422084d57dec2f89ae99c977be1b36261c8d0b4.json b/backend/.sqlx/query-ad82dc4820ab69556cba650ae422084d57dec2f89ae99c977be1b36261c8d0b4.json new file mode 100644 index 0000000..9523f66 --- /dev/null +++ b/backend/.sqlx/query-ad82dc4820ab69556cba650ae422084d57dec2f89ae99c977be1b36261c8d0b4.json @@ -0,0 +1,151 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposals \n SET status = 'closed', voting_ends_at = NOW()\n WHERE id = $1\n RETURNING id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "ad82dc4820ab69556cba650ae422084d57dec2f89ae99c977be1b36261c8d0b4" +} diff --git a/backend/.sqlx/query-aeee6c4f9115efe386351097cb8c3512550eeecac893289d40b8e01c04f9f167.json b/backend/.sqlx/query-aeee6c4f9115efe386351097cb8c3512550eeecac893289d40b8e01c04f9f167.json new file mode 100644 index 0000000..65f3bf5 --- /dev/null +++ b/backend/.sqlx/query-aeee6c4f9115efe386351097cb8c3512550eeecac893289d40b8e01c04f9f167.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE community_federations SET \n approved_locally = true,\n status = CASE WHEN approved_remotely THEN 'active'::federation_status ELSE status END,\n updated_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "aeee6c4f9115efe386351097cb8c3512550eeecac893289d40b8e01c04f9f167" +} diff --git a/backend/.sqlx/query-aefd6f17ea2becd41283f5b07fca49481cbe221437fed57283b0f52c03bf85c7.json b/backend/.sqlx/query-aefd6f17ea2becd41283f5b07fca49481cbe221437fed57283b0f52c03bf85c7.json new file mode 100644 index 0000000..4278ba8 --- /dev/null +++ b/backend/.sqlx/query-aefd6f17ea2becd41283f5b07fca49481cbe221437fed57283b0f52c03bf85c7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE notifications SET is_read = true WHERE id = $1 AND user_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "aefd6f17ea2becd41283f5b07fca49481cbe221437fed57283b0f52c03bf85c7" +} diff --git a/backend/.sqlx/query-b038e92708c16243759184994795f864db1cabbdf8f0d93128d41850888e531c.json b/backend/.sqlx/query-b038e92708c16243759184994795f864db1cabbdf8f0d93128d41850888e531c.json new file mode 100644 index 0000000..af971ca --- /dev/null +++ b/backend/.sqlx/query-b038e92708c16243759184994795f864db1cabbdf8f0d93128d41850888e531c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO summary_edit_history (summary_id, version, content, key_points, editor_id)\n SELECT id, version, content, key_points, last_editor_id\n FROM deliberation_summaries WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b038e92708c16243759184994795f864db1cabbdf8f0d93128d41850888e531c" +} diff --git a/backend/.sqlx/query-b09e737c4925f4ebb8fc57919c50542f125f4fc7d2ba49c3cd368682aba9a8b6.json b/backend/.sqlx/query-b09e737c4925f4ebb8fc57919c50542f125f4fc7d2ba49c3cd368682aba9a8b6.json new file mode 100644 index 0000000..b4b4198 --- /dev/null +++ b/backend/.sqlx/query-b09e737c4925f4ebb8fc57919c50542f125f4fc7d2ba49c3cd368682aba9a8b6.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE deliberation_summaries SET\n is_approved = true, approved_by = $2, approved_at = NOW()\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b09e737c4925f4ebb8fc57919c50542f125f4fc7d2ba49c3cd368682aba9a8b6" +} diff --git a/backend/.sqlx/query-b3c77106c16c2b75c51a36e6e56f9cff657fce6f0472d905af97ffe5aba8d3c7.json b/backend/.sqlx/query-b3c77106c16c2b75c51a36e6e56f9cff657fce6f0472d905af97ffe5aba8d3c7.json new file mode 100644 index 0000000..1b312a0 --- /dev/null +++ b/backend/.sqlx/query-b3c77106c16c2b75c51a36e6e56f9cff657fce6f0472d905af97ffe5aba8d3c7.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*)::int AS total_votes\n FROM votes WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_votes", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "b3c77106c16c2b75c51a36e6e56f9cff657fce6f0472d905af97ffe5aba8d3c7" +} diff --git a/backend/.sqlx/query-b42cab53e091470ad8237271def4b42146a75d30b77aecb4c1ae7ab7a7cc32d3.json b/backend/.sqlx/query-b42cab53e091470ad8237271def4b42146a75d30b77aecb4c1ae7ab7a7cc32d3.json new file mode 100644 index 0000000..9474337 --- /dev/null +++ b/backend/.sqlx/query-b42cab53e091470ad8237271def4b42146a75d30b77aecb4c1ae7ab7a7cc32d3.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, voting_method, status as \"status: crate::models::ProposalStatus\" FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "status: crate::models::ProposalStatus", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "b42cab53e091470ad8237271def4b42146a75d30b77aecb4c1ae7ab7a7cc32d3" +} diff --git a/backend/.sqlx/query-b54b3e3f031e9a09d1252bcc9e27ad26959157f77064a4e4d30ed73c3a01e293.json b/backend/.sqlx/query-b54b3e3f031e9a09d1252bcc9e27ad26959157f77064a4e4d30ed73c3a01e293.json new file mode 100644 index 0000000..1c6fc0f --- /dev/null +++ b/backend/.sqlx/query-b54b3e3f031e9a09d1252bcc9e27ad26959157f77064a4e4d30ed73c3a01e293.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM proposals WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b54b3e3f031e9a09d1252bcc9e27ad26959157f77064a4e4d30ed73c3a01e293" +} diff --git a/backend/.sqlx/query-b5bada0e6df57aa0243e0234b72c3b39cf6f1ee0ef69a4e232ae06f70b1b1c0c.json b/backend/.sqlx/query-b5bada0e6df57aa0243e0234b72c3b39cf6f1ee0ef69a4e232ae06f70b1b1c0c.json new file mode 100644 index 0000000..892cee5 --- /dev/null +++ b/backend/.sqlx/query-b5bada0e6df57aa0243e0234b72c3b39cf6f1ee0ef69a4e232ae06f70b1b1c0c.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, name, export_type, public_access\n FROM export_configurations\n WHERE community_id = $1 AND is_active = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "export_type", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "public_access", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + true, + false, + false, + false + ] + }, + "hash": "b5bada0e6df57aa0243e0234b72c3b39cf6f1ee0ef69a4e232ae06f70b1b1c0c" +} diff --git a/backend/.sqlx/query-b5d49d78ba8674954210df5728d43dca77d453a7aae14fb8b0e57e0f7a44e03a.json b/backend/.sqlx/query-b5d49d78ba8674954210df5728d43dca77d453a7aae14fb8b0e57e0f7a44e03a.json new file mode 100644 index 0000000..5eafddb --- /dev/null +++ b/backend/.sqlx/query-b5d49d78ba8674954210df5728d43dca77d453a7aae14fb8b0e57e0f7a44e03a.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n is_active,\n settings as \"settings!: serde_json::Value\"\n FROM community_plugins\n WHERE community_id = $1 AND plugin_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "settings!: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "b5d49d78ba8674954210df5728d43dca77d453a7aae14fb8b0e57e0f7a44e03a" +} diff --git a/backend/.sqlx/query-b6bdb63000935a47301993972237254f9b11915a860d6e84ad639f579bb6ac62.json b/backend/.sqlx/query-b6bdb63000935a47301993972237254f9b11915a860d6e84ad639f579bb6ac62.json new file mode 100644 index 0000000..625d57f --- /dev/null +++ b/backend/.sqlx/query-b6bdb63000935a47301993972237254f9b11915a860d6e84ad639f579bb6ac62.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(\n (SELECT (cp.settings->>'auto_start_workflow')::boolean\n FROM community_plugins cp\n JOIN plugins p ON p.id = cp.plugin_id\n WHERE cp.community_id = $1 AND p.name = 'decision_workflows'),\n true\n ) AS \"auto_start!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "auto_start!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "b6bdb63000935a47301993972237254f9b11915a860d6e84ad639f579bb6ac62" +} diff --git a/backend/.sqlx/query-b6d2983946f27ac29c85e72685d389d29c8ce1b3c9e3202a272f12f2e6dfbf06.json b/backend/.sqlx/query-b6d2983946f27ac29c85e72685d389d29c8ce1b3c9e3202a272f12f2e6dfbf06.json new file mode 100644 index 0000000..397fc3d --- /dev/null +++ b/backend/.sqlx/query-b6d2983946f27ac29c85e72685d389d29c8ce1b3c9e3202a272f12f2e6dfbf06.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM quadratic_votes WHERE proposal_id = $1 AND voter_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b6d2983946f27ac29c85e72685d389d29c8ce1b3c9e3202a272f12f2e6dfbf06" +} diff --git a/backend/.sqlx/query-b7fbd0f031db0653546e51da4237160abe2909f3dcf622f9ab1b91537c9169fa.json b/backend/.sqlx/query-b7fbd0f031db0653546e51da4237160abe2909f3dcf622f9ab1b91537c9169fa.json new file mode 100644 index 0000000..ac7509f --- /dev/null +++ b/backend/.sqlx/query-b7fbd0f031db0653546e51da4237160abe2909f3dcf622f9ab1b91537c9169fa.json @@ -0,0 +1,151 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposals \n SET status = 'discussion'\n WHERE id = $1\n RETURNING id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "b7fbd0f031db0653546e51da4237160abe2909f3dcf622f9ab1b91537c9169fa" +} diff --git a/backend/.sqlx/query-b8ef84a72cfa6cea3a1b1d8cb5916d7e3290de0a05143432151308f32312f94a.json b/backend/.sqlx/query-b8ef84a72cfa6cea3a1b1d8cb5916d7e3290de0a05143432151308f32312f94a.json new file mode 100644 index 0000000..5660d7c --- /dev/null +++ b/backend/.sqlx/query-b8ef84a72cfa6cea3a1b1d8cb5916d7e3290de0a05143432151308f32312f94a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT read_at FROM proposal_resource_reads WHERE resource_id = $1 AND user_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "read_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "b8ef84a72cfa6cea3a1b1d8cb5916d7e3290de0a05143432151308f32312f94a" +} diff --git a/backend/.sqlx/query-b8f80a210fcb0474a124ad93e43a163431366c50dce137863b283121ce70604b.json b/backend/.sqlx/query-b8f80a210fcb0474a124ad93e43a163431366c50dce137863b283121ce70604b.json new file mode 100644 index 0000000..6571799 --- /dev/null +++ b/backend/.sqlx/query-b8f80a210fcb0474a124ad93e43a163431366c50dce137863b283121ce70604b.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT option_id, COUNT(*) as count \n FROM votes WHERE proposal_id = $1 \n GROUP BY option_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "option_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + null + ] + }, + "hash": "b8f80a210fcb0474a124ad93e43a163431366c50dce137863b283121ce70604b" +} diff --git a/backend/.sqlx/query-b94c2ca7181b28660e9a9b256e6ebe98b6cf0c450969b6b953434b8d2775ee4b.json b/backend/.sqlx/query-b94c2ca7181b28660e9a9b256e6ebe98b6cf0c450969b6b953434b8d2775ee4b.json new file mode 100644 index 0000000..699cd31 --- /dev/null +++ b/backend/.sqlx/query-b94c2ca7181b28660e9a9b256e6ebe98b6cf0c450969b6b953434b8d2775ee4b.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO federation_requests \n (from_instance_url, from_community_name, to_community_id, request_message)\n VALUES ($1, $2, $3, $4)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Uuid", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "b94c2ca7181b28660e9a9b256e6ebe98b6cf0c450969b6b953434b8d2775ee4b" +} diff --git a/backend/.sqlx/query-b9586185e84644f0bd936d7bf5e9bec6ebeaba77ab354d0b7096d9334656497f.json b/backend/.sqlx/query-b9586185e84644f0bd936d7bf5e9bec6ebeaba77ab354d0b7096d9334656497f.json new file mode 100644 index 0000000..30a0879 --- /dev/null +++ b/backend/.sqlx/query-b9586185e84644f0bd936d7bf5e9bec6ebeaba77ab354d0b7096d9334656497f.json @@ -0,0 +1,79 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE instance_settings SET\n setup_completed = true,\n setup_completed_at = NOW(),\n setup_completed_by = $1,\n instance_name = $2,\n platform_mode = $3,\n single_community_id = $4\n RETURNING id, setup_completed, instance_name, platform_mode,\n registration_enabled, registration_mode,\n default_community_visibility, allow_private_communities,\n default_plugin_policy, default_moderation_mode", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "setup_completed", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "instance_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "platform_mode", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "registration_enabled", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "registration_mode", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "default_community_visibility", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "allow_private_communities", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "default_plugin_policy", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "default_moderation_mode", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "b9586185e84644f0bd936d7bf5e9bec6ebeaba77ab354d0b7096d9334656497f" +} diff --git a/backend/.sqlx/query-b9e59dc4e2e37b0c59ebbe794b03e64647af6b6025b3c74c73324b5735e32802.json b/backend/.sqlx/query-b9e59dc4e2e37b0c59ebbe794b03e64647af6b6025b3c74c73324b5735e32802.json new file mode 100644 index 0000000..12e1f56 --- /dev/null +++ b/backend/.sqlx/query-b9e59dc4e2e37b0c59ebbe794b03e64647af6b6025b3c74c73324b5735e32802.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT target_user_id, community_id, rule_id FROM rule_violations WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "target_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "rule_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "b9e59dc4e2e37b0c59ebbe794b03e64647af6b6025b3c74c73324b5735e32802" +} diff --git a/backend/.sqlx/query-bb0cf75500c2358d78ae8028e5d587b9e7492ce6082340ec84f7706d668beb45.json b/backend/.sqlx/query-bb0cf75500c2358d78ae8028e5d587b9e7492ce6082340ec84f7706d668beb45.json new file mode 100644 index 0000000..c231346 --- /dev/null +++ b/backend/.sqlx/query-bb0cf75500c2358d78ae8028e5d587b9e7492ce6082340ec84f7706d668beb45.json @@ -0,0 +1,151 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposals \n SET status = 'voting', voting_starts_at = NOW()\n WHERE id = $1\n RETURNING id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "bb0cf75500c2358d78ae8028e5d587b9e7492ce6082340ec84f7706d668beb45" +} diff --git a/backend/.sqlx/query-bbcdcddcd2f31a7e308016f325e9914a10c18cbc05d4b724bb87533cd3851fea.json b/backend/.sqlx/query-bbcdcddcd2f31a7e308016f325e9914a10c18cbc05d4b724bb87533cd3851fea.json new file mode 100644 index 0000000..9c92256 --- /dev/null +++ b/backend/.sqlx/query-bbcdcddcd2f31a7e308016f325e9914a10c18cbc05d4b724bb87533cd3851fea.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT sanction_type::text AS \"sanction_type!\", duration_hours\n FROM get_escalated_sanction($1, $2, $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sanction_type!", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "duration_hours", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Int4" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "bbcdcddcd2f31a7e308016f325e9914a10c18cbc05d4b724bb87533cd3851fea" +} diff --git a/backend/.sqlx/query-bd76cc54f3ccd774a62f6d2b5ab27e5432bc2202b3e8f030a24a1733c96867af.json b/backend/.sqlx/query-bd76cc54f3ccd774a62f6d2b5ab27e5432bc2202b3e8f030a24a1733c96867af.json new file mode 100644 index 0000000..455c756 --- /dev/null +++ b/backend/.sqlx/query-bd76cc54f3ccd774a62f6d2b5ab27e5432bc2202b3e8f030a24a1733c96867af.json @@ -0,0 +1,64 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, community_id, title, description,\n conflict_type::text AS \"conflict_type!\",\n status::text AS \"status!\",\n severity_level, is_urgent\n FROM conflict_cases \n WHERE community_id = $1 AND status NOT IN ('resolved', 'closed')\n ORDER BY is_urgent DESC, severity_level DESC, reported_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "conflict_type!", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "severity_level", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "is_urgent", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + null, + null, + false, + false + ] + }, + "hash": "bd76cc54f3ccd774a62f6d2b5ab27e5432bc2202b3e8f030a24a1733c96867af" +} diff --git a/backend/.sqlx/query-bfdd9a0dcf20053b8d10c946955530253502e9c624b6586b358660df9d1ec8cc.json b/backend/.sqlx/query-bfdd9a0dcf20053b8d10c946955530253502e9c624b6586b358660df9d1ec8cc.json new file mode 100644 index 0000000..b46104e --- /dev/null +++ b/backend/.sqlx/query-bfdd9a0dcf20053b8d10c946955530253502e9c624b6586b358660df9d1ec8cc.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n voting_method,\n proposals_using_method,\n total_votes_cast,\n avg_turnout::float8 AS turnout,\n avg_time_to_decide_hours::float8 AS avg_time,\n decisive_results,\n close_results\n FROM voting_method_analytics\n WHERE community_id = $1\n ORDER BY proposals_using_method DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "proposals_using_method", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "total_votes_cast", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "turnout", + "type_info": "Float8" + }, + { + "ordinal": 4, + "name": "avg_time", + "type_info": "Float8" + }, + { + "ordinal": 5, + "name": "decisive_results", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "close_results", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + null, + null, + false, + false + ] + }, + "hash": "bfdd9a0dcf20053b8d10c946955530253502e9c624b6586b358660df9d1ec8cc" +} diff --git a/backend/.sqlx/query-c2a2d3c0016e6d4df3fd04f9e24c5c4fde0acbe42a195eaf74cf5299dacb846c.json b/backend/.sqlx/query-c2a2d3c0016e6d4df3fd04f9e24c5c4fde0acbe42a195eaf74cf5299dacb846c.json new file mode 100644 index 0000000..3e64500 --- /dev/null +++ b/backend/.sqlx/query-c2a2d3c0016e6d4df3fd04f9e24c5c4fde0acbe42a195eaf74cf5299dacb846c.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, community_id, export_type, \n format::text AS \"format!\", status::text AS \"status!\",\n record_count, download_url\n FROM export_jobs WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "export_type", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "format!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "record_count", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "download_url", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + true, + false, + null, + null, + true, + true + ] + }, + "hash": "c2a2d3c0016e6d4df3fd04f9e24c5c4fde0acbe42a195eaf74cf5299dacb846c" +} diff --git a/backend/.sqlx/query-c2ca1cf0be6dc893f3f6938953436c69408bd8ca37d45398a1faf450684aec08.json b/backend/.sqlx/query-c2ca1cf0be6dc893f3f6938953436c69408bd8ca37d45398a1faf450684aec08.json new file mode 100644 index 0000000..8a35ddb --- /dev/null +++ b/backend/.sqlx/query-c2ca1cf0be6dc893f3f6938953436c69408bd8ca37d45398a1faf450684aec08.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegation_log (delegation_id, delegator_id, delegate_id, action, scope, community_id, topic_id, proposal_id)\n VALUES ($1, $2, $3, 'created', $4::delegation_scope, $5, $6, $7)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + }, + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c2ca1cf0be6dc893f3f6938953436c69408bd8ca37d45398a1faf450684aec08" +} diff --git a/backend/.sqlx/query-c31ebddef69f304403bfb2d816d42bd9731482ceec258e0de1bb61d5ba20d413.json b/backend/.sqlx/query-c31ebddef69f304403bfb2d816d42bd9731482ceec258e0de1bb61d5ba20d413.json new file mode 100644 index 0000000..6dd1c7f --- /dev/null +++ b/backend/.sqlx/query-c31ebddef69f304403bfb2d816d42bd9731482ceec258e0de1bb61d5ba20d413.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT current_version FROM proposal_lifecycle WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "current_version", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "c31ebddef69f304403bfb2d816d42bd9731482ceec258e0de1bb61d5ba20d413" +} diff --git a/backend/.sqlx/query-c33217c0ec06f08e9b89ac876cb22bf002aadb6cd6391af968d6a08725792228.json b/backend/.sqlx/query-c33217c0ec06f08e9b89ac876cb22bf002aadb6cd6391af968d6a08725792228.json new file mode 100644 index 0000000..c01da87 --- /dev/null +++ b/backend/.sqlx/query-c33217c0ec06f08e9b89ac876cb22bf002aadb6cd6391af968d6a08725792228.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM ranked_votes WHERE proposal_id = $1 AND voter_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c33217c0ec06f08e9b89ac876cb22bf002aadb6cd6391af968d6a08725792228" +} diff --git a/backend/.sqlx/query-c35608b0d7569f739dda24b3da59b7b500ff26f5e79433b3f7e3625d91177d26.json b/backend/.sqlx/query-c35608b0d7569f739dda24b3da59b7b500ff26f5e79433b3f7e3625d91177d26.json new file mode 100644 index 0000000..cb0e812 --- /dev/null +++ b/backend/.sqlx/query-c35608b0d7569f739dda24b3da59b7b500ff26f5e79433b3f7e3625d91177d26.json @@ -0,0 +1,79 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE instance_settings SET\n instance_name = COALESCE($1, instance_name),\n platform_mode = COALESCE($2, platform_mode),\n registration_enabled = COALESCE($3, registration_enabled),\n registration_mode = COALESCE($4, registration_mode)\n RETURNING id, setup_completed, instance_name, platform_mode,\n registration_enabled, registration_mode,\n default_community_visibility, allow_private_communities,\n default_plugin_policy, default_moderation_mode", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "setup_completed", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "instance_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "platform_mode", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "registration_enabled", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "registration_mode", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "default_community_visibility", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "allow_private_communities", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "default_plugin_policy", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "default_moderation_mode", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Bool", + "Varchar" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "c35608b0d7569f739dda24b3da59b7b500ff26f5e79433b3f7e3625d91177d26" +} diff --git a/backend/.sqlx/query-c42152b0a4b91fd58afbdb06da1a9b7b73f414bdd6607343df01dc0fa3d4b5fd.json b/backend/.sqlx/query-c42152b0a4b91fd58afbdb06da1a9b7b73f414bdd6607343df01dc0fa3d4b5fd.json new file mode 100644 index 0000000..fe7151a --- /dev/null +++ b/backend/.sqlx/query-c42152b0a4b91fd58afbdb06da1a9b7b73f414bdd6607343df01dc0fa3d4b5fd.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM get_federation_stats($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_federations", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "active_federations", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "federated_proposals", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "total_syncs", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "last_sync", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null + ] + }, + "hash": "c42152b0a4b91fd58afbdb06da1a9b7b73f414bdd6607343df01dc0fa3d4b5fd" +} diff --git a/backend/.sqlx/query-c4d21db839f7e0b45fc1fc1291840e78d1e92440e003c5ad3cb45c3d9f5042fc.json b/backend/.sqlx/query-c4d21db839f7e0b45fc1fc1291840e78d1e92440e003c5ad3cb45c3d9f5042fc.json new file mode 100644 index 0000000..c4f2bec --- /dev/null +++ b/backend/.sqlx/query-c4d21db839f7e0b45fc1fc1291840e78d1e92440e003c5ad3cb45c3d9f5042fc.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, name, description, is_default, is_system, config\n FROM workflow_templates\n WHERE community_id IS NULL OR community_id = $1\n ORDER BY is_system DESC, name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "is_system", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + true, + false, + true, + false, + false, + false + ] + }, + "hash": "c4d21db839f7e0b45fc1fc1291840e78d1e92440e003c5ad3cb45c3d9f5042fc" +} diff --git a/backend/.sqlx/query-c5f66036fe85ebb4597f564dcf40b7547b409195b65ea3130454f96265c9bd82.json b/backend/.sqlx/query-c5f66036fe85ebb4597f564dcf40b7547b409195b65ea3130454f96265c9bd82.json new file mode 100644 index 0000000..760a9ab --- /dev/null +++ b/backend/.sqlx/query-c5f66036fe85ebb4597f564dcf40b7547b409195b65ea3130454f96265c9bd82.json @@ -0,0 +1,155 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposals (community_id, author_id, title, description, voting_method)\n VALUES ($1, $2, $3, $4, $5)\n RETURNING id, community_id, author_id, title, description,\n status as \"status: _\", voting_method, voting_starts_at, voting_ends_at,\n created_at, updated_at, deliberation_phase as \"deliberation_phase: _\",\n inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at,\n min_read_time_seconds, facilitator_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "status: _", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 6, + "name": "voting_method", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "voting_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 8, + "name": "voting_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 10, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "deliberation_phase: _", + "type_info": { + "Custom": { + "name": "deliberation_phase", + "kind": { + "Enum": [ + "drafting", + "informing", + "discussing", + "voting", + "concluded" + ] + } + } + } + }, + { + "ordinal": 12, + "name": "inform_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 13, + "name": "inform_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 14, + "name": "discuss_starts_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 15, + "name": "discuss_ends_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 16, + "name": "min_read_time_seconds", + "type_info": "Int4" + }, + { + "ordinal": 17, + "name": "facilitator_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Text", + "Varchar" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "c5f66036fe85ebb4597f564dcf40b7547b409195b65ea3130454f96265c9bd82" +} diff --git a/backend/.sqlx/query-c64fd6da083e54e1719580125eb72a544081ed4c91c02295c6f1d799fb3b4f5a.json b/backend/.sqlx/query-c64fd6da083e54e1719580125eb72a544081ed4c91c02295c6f1d799fb3b4f5a.json new file mode 100644 index 0000000..b05c926 --- /dev/null +++ b/backend/.sqlx/query-c64fd6da083e54e1719580125eb72a544081ed4c91c02295c6f1d799fb3b4f5a.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT voter_id, option_id, stars FROM star_votes WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "voter_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "option_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "stars", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "c64fd6da083e54e1719580125eb72a544081ed4c91c02295c6f1d799fb3b4f5a" +} diff --git a/backend/.sqlx/query-c7525a8aa940491736dd45cd876dab2e4cd0d05f0104e2d2b08980bcee557212.json b/backend/.sqlx/query-c7525a8aa940491736dd45cd876dab2e4cd0d05f0104e2d2b08980bcee557212.json new file mode 100644 index 0000000..e482507 --- /dev/null +++ b/backend/.sqlx/query-c7525a8aa940491736dd45cd876dab2e4cd0d05f0104e2d2b08980bcee557212.json @@ -0,0 +1,72 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE communities \n SET name = COALESCE($1, name), \n description = COALESCE($2, description)\n WHERE id = $3 AND is_active = true\n RETURNING *", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "settings", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "c7525a8aa940491736dd45cd876dab2e4cd0d05f0104e2d2b08980bcee557212" +} diff --git a/backend/.sqlx/query-cbb4acc6eb730ec886b1af23e6389a0e68d7a672d8643101f6680e42df944711.json b/backend/.sqlx/query-cbb4acc6eb730ec886b1af23e6389a0e68d7a672d8643101f6680e42df944711.json new file mode 100644 index 0000000..b9e735d --- /dev/null +++ b/backend/.sqlx/query-cbb4acc6eb730ec886b1af23e6389a0e68d7a672d8643101f6680e42df944711.json @@ -0,0 +1,76 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n rv.id, cr.code AS rule_code, cr.title AS rule_title,\n cr.severity::text AS \"severity!\",\n rv.target_user_id, tu.username AS target_username,\n rv.reported_by, rv.status::text AS \"status!\",\n rv.reported_at, rv.report_reason\n FROM rule_violations rv\n JOIN community_rules cr ON cr.id = rv.rule_id\n JOIN users tu ON tu.id = rv.target_user_id\n WHERE rv.community_id = $1 \n AND rv.status IN ('reported', 'under_review')\n ORDER BY \n CASE cr.severity \n WHEN 'critical' THEN 1 \n WHEN 'major' THEN 2 \n ELSE 3 \n END,\n rv.reported_at", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "rule_code", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "rule_title", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "severity!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "target_user_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, + "name": "target_username", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "reported_by", + "type_info": "Uuid" + }, + { + "ordinal": 7, + "name": "status!", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "reported_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "report_reason", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + null, + false, + false, + true, + null, + false, + true + ] + }, + "hash": "cbb4acc6eb730ec886b1af23e6389a0e68d7a672d8643101f6680e42df944711" +} diff --git a/backend/.sqlx/query-cc77b8a12eb03938d34d391c8b1397dd2fe99748315a95299563c3a2f5c989bf.json b/backend/.sqlx/query-cc77b8a12eb03938d34d391c8b1397dd2fe99748315a95299563c3a2f5c989bf.json new file mode 100644 index 0000000..7d6a310 --- /dev/null +++ b/backend/.sqlx/query-cc77b8a12eb03938d34d391c8b1397dd2fe99748315a95299563c3a2f5c989bf.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM comment_reactions WHERE comment_id = $1 AND user_id = $2 AND reaction_type = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cc77b8a12eb03938d34d391c8b1397dd2fe99748315a95299563c3a2f5c989bf" +} diff --git a/backend/.sqlx/query-cc97b910b8afcfd348d5fe69f7e75862ddd7e31680e46a61170a467b64cdf547.json b/backend/.sqlx/query-cc97b910b8afcfd348d5fe69f7e75862ddd7e31680e46a61170a467b64cdf547.json new file mode 100644 index 0000000..ccbe8bf --- /dev/null +++ b/backend/.sqlx/query-cc97b910b8afcfd348d5fe69f7e75862ddd7e31680e46a61170a467b64cdf547.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO communities (name, slug, description, is_active, created_by)\n VALUES ($1, $2, $3, true, $4)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cc97b910b8afcfd348d5fe69f7e75862ddd7e31680e46a61170a467b64cdf547" +} diff --git a/backend/.sqlx/query-cca54c0742b36cc4b7c3c2bf0bb3d987d16771d42334f11c4e9d7598651d21e6.json b/backend/.sqlx/query-cca54c0742b36cc4b7c3c2bf0bb3d987d16771d42334f11c4e9d7598651d21e6.json new file mode 100644 index 0000000..4a2b371 --- /dev/null +++ b/backend/.sqlx/query-cca54c0742b36cc4b7c3c2bf0bb3d987d16771d42334f11c4e9d7598651d21e6.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE voting_method_plugins SET is_default = FALSE", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "cca54c0742b36cc4b7c3c2bf0bb3d987d16771d42334f11c4e9d7598651d21e6" +} diff --git a/backend/.sqlx/query-ccf5a42b62f74eeb3d424fc4573ec6b01b1ed46b56518c0ccc5121e524d2f3c5.json b/backend/.sqlx/query-ccf5a42b62f74eeb3d424fc4573ec6b01b1ed46b56518c0ccc5121e524d2f3c5.json new file mode 100644 index 0000000..8276fc6 --- /dev/null +++ b/backend/.sqlx/query-ccf5a42b62f74eeb3d424fc4573ec6b01b1ed46b56518c0ccc5121e524d2f3c5.json @@ -0,0 +1,74 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM users WHERE is_active = true ORDER BY created_at DESC LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "username", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "password_hash", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "is_admin", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "invited_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "ccf5a42b62f74eeb3d424fc4573ec6b01b1ed46b56518c0ccc5121e524d2f3c5" +} diff --git a/backend/.sqlx/query-cd002400a4b0c6b071bb949d0c716ab2691caa661ccc407af85da42599e1101b.json b/backend/.sqlx/query-cd002400a4b0c6b071bb949d0c716ab2691caa661ccc407af85da42599e1101b.json new file mode 100644 index 0000000..c7dbe5c --- /dev/null +++ b/backend/.sqlx/query-cd002400a4b0c6b071bb949d0c716ab2691caa661ccc407af85da42599e1101b.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE proposal_lifecycle SET amendment_count = amendment_count + 1 WHERE proposal_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "cd002400a4b0c6b071bb949d0c716ab2691caa661ccc407af85da42599e1101b" +} diff --git a/backend/.sqlx/query-cdb2b8bb16c0e2e04fa6df8e22e060adfc81c38cf67121ada9f6c721a9398588.json b/backend/.sqlx/query-cdb2b8bb16c0e2e04fa6df8e22e060adfc81c38cf67121ada9f6c721a9398588.json new file mode 100644 index 0000000..c49fdc9 --- /dev/null +++ b/backend/.sqlx/query-cdb2b8bb16c0e2e04fa6df8e22e060adfc81c38cf67121ada9f6c721a9398588.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT party_a_response, party_b_response, conflict_id\n FROM compromise_proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "party_a_response", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "party_b_response", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "conflict_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + false + ] + }, + "hash": "cdb2b8bb16c0e2e04fa6df8e22e060adfc81c38cf67121ada9f6c721a9398588" +} diff --git a/backend/.sqlx/query-cde649035ebc12f0bbce749e9a5347b8073659b42c9e65a4ac74400b17dc38f7.json b/backend/.sqlx/query-cde649035ebc12f0bbce749e9a5347b8073659b42c9e65a4ac74400b17dc38f7.json new file mode 100644 index 0000000..51c398c --- /dev/null +++ b/backend/.sqlx/query-cde649035ebc12f0bbce749e9a5347b8073659b42c9e65a4ac74400b17dc38f7.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT r.name\n FROM user_roles ur\n JOIN roles r ON r.id = ur.role_id\n WHERE ur.user_id = $1\n ORDER BY CASE r.name\n WHEN 'platform_admin' THEN 1\n WHEN 'platform_moderator' THEN 2\n ELSE 3\n END\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cde649035ebc12f0bbce749e9a5347b8073659b42c9e65a4ac74400b17dc38f7" +} diff --git a/backend/.sqlx/query-ce7f978d16864a568b49c47eaa68edcaafb0d293f6d08f5a945bd456c91fd417.json b/backend/.sqlx/query-ce7f978d16864a568b49c47eaa68edcaafb0d293f6d08f5a945bd456c91fd417.json new file mode 100644 index 0000000..358b50c --- /dev/null +++ b/backend/.sqlx/query-ce7f978d16864a568b49c47eaa68edcaafb0d293f6d08f5a945bd456c91fd417.json @@ -0,0 +1,90 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT d.id, d.delegator_id, d.delegate_id, u.username as delegate_username,\n d.scope as \"scope: DelegationScope\", d.community_id, d.topic_id, \n d.proposal_id, d.is_active, d.created_at\n FROM delegations d\n JOIN users u ON d.delegate_id = u.id\n WHERE d.delegator_id = $1\n AND ($2 = FALSE OR d.is_active = TRUE)\n AND ($3::uuid IS NULL OR d.community_id = $3)\n ORDER BY d.created_at DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "delegator_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "delegate_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "delegate_username", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "scope: DelegationScope", + "type_info": { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + } + }, + { + "ordinal": 5, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 6, + "name": "topic_id", + "type_info": "Uuid" + }, + { + "ordinal": 7, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 9, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Bool", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + true, + true, + true, + false, + false + ] + }, + "hash": "ce7f978d16864a568b49c47eaa68edcaafb0d293f6d08f5a945bd456c91fd417" +} diff --git a/backend/.sqlx/query-cf9a5a22ac9e4ab44a56d2aa4e110a63ba212d392a353850a344f69a0a13811f.json b/backend/.sqlx/query-cf9a5a22ac9e4ab44a56d2aa4e110a63ba212d392a353850a344f69a0a13811f.json new file mode 100644 index 0000000..9eb7e22 --- /dev/null +++ b/backend/.sqlx/query-cf9a5a22ac9e4ab44a56d2aa4e110a63ba212d392a353850a344f69a0a13811f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "cf9a5a22ac9e4ab44a56d2aa4e110a63ba212d392a353850a344f69a0a13811f" +} diff --git a/backend/.sqlx/query-d00ba1f23877ca15a8ac8b09d55fc86a37d40c9a0ea63afdbfa2f725fa39d255.json b/backend/.sqlx/query-d00ba1f23877ca15a8ac8b09d55fc86a37d40c9a0ea63afdbfa2f725fa39d255.json new file mode 100644 index 0000000..4efc24a --- /dev/null +++ b/backend/.sqlx/query-d00ba1f23877ca15a8ac8b09d55fc86a37d40c9a0ea63afdbfa2f725fa39d255.json @@ -0,0 +1,68 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT r.id, r.name, r.display_name, r.description, r.color,\n r.is_system, r.is_default, r.priority,\n ARRAY_AGG(p.name) FILTER (WHERE p.name IS NOT NULL) as permissions\n FROM roles r\n LEFT JOIN role_permissions rp ON r.id = rp.role_id AND rp.granted = TRUE\n LEFT JOIN permissions p ON rp.permission_id = p.id\n WHERE r.community_id IS NULL\n GROUP BY r.id\n ORDER BY r.priority DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "color", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_system", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "priority", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "permissions", + "type_info": "VarcharArray" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + false, + null + ] + }, + "hash": "d00ba1f23877ca15a8ac8b09d55fc86a37d40c9a0ea63afdbfa2f725fa39d255" +} diff --git a/backend/.sqlx/query-d09299b064ef5fefd5f5ac8a7f1ae1a549cfffd9081fa72b4427fa4ea21cbcb5.json b/backend/.sqlx/query-d09299b064ef5fefd5f5ac8a7f1ae1a549cfffd9081fa72b4427fa4ea21cbcb5.json new file mode 100644 index 0000000..7db47b3 --- /dev/null +++ b/backend/.sqlx/query-d09299b064ef5fefd5f5ac8a7f1ae1a549cfffd9081fa72b4427fa4ea21cbcb5.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM proposal_options WHERE proposal_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d09299b064ef5fefd5f5ac8a7f1ae1a549cfffd9081fa72b4427fa4ea21cbcb5" +} diff --git a/backend/.sqlx/query-d0c599e01228531f2ab7f571658c71563fd1ad88cfa5e13f1adfaad3f00eaa75.json b/backend/.sqlx/query-d0c599e01228531f2ab7f571658c71563fd1ad88cfa5e13f1adfaad3f00eaa75.json new file mode 100644 index 0000000..6d06818 --- /dev/null +++ b/backend/.sqlx/query-d0c599e01228531f2ab7f571658c71563fd1ad88cfa5e13f1adfaad3f00eaa75.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM proposals WHERE community_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d0c599e01228531f2ab7f571658c71563fd1ad88cfa5e13f1adfaad3f00eaa75" +} diff --git a/backend/.sqlx/query-d2de711bc12629a1141d11db45496d22c0ff03555c983205b512a318290948a0.json b/backend/.sqlx/query-d2de711bc12629a1141d11db45496d22c0ff03555c983205b512a318290948a0.json new file mode 100644 index 0000000..aa9616b --- /dev/null +++ b/backend/.sqlx/query-d2de711bc12629a1141d11db45496d22c0ff03555c983205b512a318290948a0.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM comments WHERE author_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d2de711bc12629a1141d11db45496d22c0ff03555c983205b512a318290948a0" +} diff --git a/backend/.sqlx/query-d32e84b6ce21f4b4553aead45ef53dce4362399a75c39f7203cbd770e997abdf.json b/backend/.sqlx/query-d32e84b6ce21f4b4553aead45ef53dce4362399a75c39f7203cbd770e997abdf.json new file mode 100644 index 0000000..a4f22b6 --- /dev/null +++ b/backend/.sqlx/query-d32e84b6ce21f4b4553aead45ef53dce4362399a75c39f7203cbd770e997abdf.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, proposal_id, version_number, title, content,\n change_type, change_summary, created_by, created_at\n FROM proposal_versions\n WHERE proposal_id = $1 AND version_number = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "proposal_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "version_number", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "content", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "change_type", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "change_summary", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "created_by", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "created_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + false, + false + ] + }, + "hash": "d32e84b6ce21f4b4553aead45ef53dce4362399a75c39f7203cbd770e997abdf" +} diff --git a/backend/.sqlx/query-d48c21b6a8d67db4b37ee43d6273e3d41d1cf2a2b89fe434edd009a29e69b94c.json b/backend/.sqlx/query-d48c21b6a8d67db4b37ee43d6273e3d41d1cf2a2b89fe434edd009a29e69b94c.json new file mode 100644 index 0000000..c379aa8 --- /dev/null +++ b/backend/.sqlx/query-d48c21b6a8d67db4b37ee43d6273e3d41d1cf2a2b89fe434edd009a29e69b94c.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, community_id, export_type, format::text AS \"format!\"\n FROM export_jobs WHERE status = 'pending'\n ORDER BY requested_at LIMIT 5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "export_type", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "format!", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + false, + null + ] + }, + "hash": "d48c21b6a8d67db4b37ee43d6273e3d41d1cf2a2b89fe434edd009a29e69b94c" +} diff --git a/backend/.sqlx/query-d705da303937bc5e1a2e929b9cf3f93e4e24b55e1dd14c4a83bca29f0d3a0341.json b/backend/.sqlx/query-d705da303937bc5e1a2e929b9cf3f93e4e24b55e1dd14c4a83bca29f0d3a0341.json new file mode 100644 index 0000000..e532790 --- /dev/null +++ b/backend/.sqlx/query-d705da303937bc5e1a2e929b9cf3f93e4e24b55e1dd14c4a83bca29f0d3a0341.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO delegation_log (delegation_id, delegator_id, delegate_id, action, scope, community_id, topic_id, proposal_id)\n VALUES ($1, $2, $3, 'revoked', $4::delegation_scope, $5, $6, $7)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + }, + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d705da303937bc5e1a2e929b9cf3f93e4e24b55e1dd14c4a83bca29f0d3a0341" +} diff --git a/backend/.sqlx/query-d83b8a3f417cd420d44aa3f7f8c462b28c2f7f2a4804ba00bb51a63759a7f8b2.json b/backend/.sqlx/query-d83b8a3f417cd420d44aa3f7f8c462b28c2f7f2a4804ba00bb51a63759a7f8b2.json new file mode 100644 index 0000000..8f70ff1 --- /dev/null +++ b/backend/.sqlx/query-d83b8a3f417cd420d44aa3f7f8c462b28c2f7f2a4804ba00bb51a63759a7f8b2.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM workflow_templates \n WHERE community_id IS NULL AND is_system = true AND name = 'Standard Governance'\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "d83b8a3f417cd420d44aa3f7f8c462b28c2f7f2a4804ba00bb51a63759a7f8b2" +} diff --git a/backend/.sqlx/query-d928c488809305d28f877c359c93614550307868213d312ea8ae24bcbb927db5.json b/backend/.sqlx/query-d928c488809305d28f877c359c93614550307868213d312ea8ae24bcbb927db5.json new file mode 100644 index 0000000..1729407 --- /dev/null +++ b/backend/.sqlx/query-d928c488809305d28f877c359c93614550307868213d312ea8ae24bcbb927db5.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE export_jobs SET status = 'failed', error_message = $2 WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "d928c488809305d28f877c359c93614550307868213d312ea8ae24bcbb927db5" +} diff --git a/backend/.sqlx/query-da70570e2492dcc5b21ff51069db1a366ff94803b0cfa5821c497b6ba06ed2fc.json b/backend/.sqlx/query-da70570e2492dcc5b21ff51069db1a366ff94803b0cfa5821c497b6ba06ed2fc.json new file mode 100644 index 0000000..6925480 --- /dev/null +++ b/backend/.sqlx/query-da70570e2492dcc5b21ff51069db1a366ff94803b0cfa5821c497b6ba06ed2fc.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE compromise_proposals SET status = 'accepted', updated_at = NOW() WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "da70570e2492dcc5b21ff51069db1a366ff94803b0cfa5821c497b6ba06ed2fc" +} diff --git a/backend/.sqlx/query-db38e6387cbf510bf0c13ecee0f42afc7c100aebd0b043fd23424cfe61110e3a.json b/backend/.sqlx/query-db38e6387cbf510bf0c13ecee0f42afc7c100aebd0b043fd23424cfe61110e3a.json new file mode 100644 index 0000000..d0fd961 --- /dev/null +++ b/backend/.sqlx/query-db38e6387cbf510bf0c13ecee0f42afc7c100aebd0b043fd23424cfe61110e3a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM votes WHERE proposal_id = $1 AND voter_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "db38e6387cbf510bf0c13ecee0f42afc7c100aebd0b043fd23424cfe61110e3a" +} diff --git a/backend/.sqlx/query-dbf52d447a84d9f1a7f36f3c57c05b1c547bd8515706b2be877762edd1aff254.json b/backend/.sqlx/query-dbf52d447a84d9f1a7f36f3c57c05b1c547bd8515706b2be877762edd1aff254.json new file mode 100644 index 0000000..efc9b83 --- /dev/null +++ b/backend/.sqlx/query-dbf52d447a84d9f1a7f36f3c57c05b1c547bd8515706b2be877762edd1aff254.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO topics (community_id, name, slug, description, parent_id)\n VALUES ($1, $2, $3, $4, $5)\n RETURNING id, community_id, name, slug, description, parent_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "parent_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Text", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + true + ] + }, + "hash": "dbf52d447a84d9f1a7f36f3c57c05b1c547bd8515706b2be877762edd1aff254" +} diff --git a/backend/.sqlx/query-dc64e1d25d9ced3a49130cee99f6edc3f70a4917910cf3b76faefc24ac32159d.json b/backend/.sqlx/query-dc64e1d25d9ced3a49130cee99f6edc3f70a4917910cf3b76faefc24ac32159d.json new file mode 100644 index 0000000..c958e8d --- /dev/null +++ b/backend/.sqlx/query-dc64e1d25d9ced3a49130cee99f6edc3f70a4917910cf3b76faefc24ac32159d.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM users", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "dc64e1d25d9ced3a49130cee99f6edc3f70a4917910cf3b76faefc24ac32159d" +} diff --git a/backend/.sqlx/query-dccbd661ee975b7e9b74d175c452fbd3ba2c847c24c6f401fd18736aeaed3ad3.json b/backend/.sqlx/query-dccbd661ee975b7e9b74d175c452fbd3ba2c847c24c6f401fd18736aeaed3ad3.json new file mode 100644 index 0000000..7cee659 --- /dev/null +++ b/backend/.sqlx/query-dccbd661ee975b7e9b74d175c452fbd3ba2c847c24c6f401fd18736aeaed3ad3.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO proposals (community_id, author_id, title, description, status, voting_method)\n VALUES ($1, $2, $3, $4, 'draft', 'approval')\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "dccbd661ee975b7e9b74d175c452fbd3ba2c847c24c6f401fd18736aeaed3ad3" +} diff --git a/backend/.sqlx/query-dd458a825f2295d6650263f39ed6f22d1da12443b80bd53a4b6c61e870b4cf07.json b/backend/.sqlx/query-dd458a825f2295d6650263f39ed6f22d1da12443b80bd53a4b6c61e870b4cf07.json new file mode 100644 index 0000000..b19e12f --- /dev/null +++ b/backend/.sqlx/query-dd458a825f2295d6650263f39ed6f22d1da12443b80bd53a4b6c61e870b4cf07.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT plugin_name, plugin_type, display_name, description,\n is_core, is_recommended, default_enabled, category\n FROM default_plugins ORDER BY sort_order, plugin_name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "plugin_name", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "plugin_type", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "is_core", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "is_recommended", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "default_enabled", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "category", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + true + ] + }, + "hash": "dd458a825f2295d6650263f39ed6f22d1da12443b80bd53a4b6c61e870b4cf07" +} diff --git a/backend/.sqlx/query-dd4e31b27b005a68baeacf0ada1645ebe2277a8c437d6846db1776b53611aa2e.json b/backend/.sqlx/query-dd4e31b27b005a68baeacf0ada1645ebe2277a8c437d6846db1776b53611aa2e.json new file mode 100644 index 0000000..66f6d57 --- /dev/null +++ b/backend/.sqlx/query-dd4e31b27b005a68baeacf0ada1645ebe2277a8c437d6846db1776b53611aa2e.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT setup_completed, instance_name FROM instance_settings LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "setup_completed", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "instance_name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "dd4e31b27b005a68baeacf0ada1645ebe2277a8c437d6846db1776b53611aa2e" +} diff --git a/backend/.sqlx/query-dd99e48b1572e25db38f03da95984fda1072913b29bb6b3753a0d351583dfff6.json b/backend/.sqlx/query-dd99e48b1572e25db38f03da95984fda1072913b29bb6b3753a0d351583dfff6.json new file mode 100644 index 0000000..74b2594 --- /dev/null +++ b/backend/.sqlx/query-dd99e48b1572e25db38f03da95984fda1072913b29bb6b3753a0d351583dfff6.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM users WHERE username = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "dd99e48b1572e25db38f03da95984fda1072913b29bb6b3753a0d351583dfff6" +} diff --git a/backend/.sqlx/query-de3230de507ca1e11d2ca40bef8a5b8470628ddbaa454af4f49f6fe6953f9014.json b/backend/.sqlx/query-de3230de507ca1e11d2ca40bef8a5b8470628ddbaa454af4f49f6fe6953f9014.json new file mode 100644 index 0000000..6d3b104 --- /dev/null +++ b/backend/.sqlx/query-de3230de507ca1e11d2ca40bef8a5b8470628ddbaa454af4f49f6fe6953f9014.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT username FROM users WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "username", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "de3230de507ca1e11d2ca40bef8a5b8470628ddbaa454af4f49f6fe6953f9014" +} diff --git a/backend/.sqlx/query-de6d39c5473742a38ff437b95613ae32a366f4339c8165f58d192151024b6caf.json b/backend/.sqlx/query-de6d39c5473742a38ff437b95613ae32a366f4339c8165f58d192151024b6caf.json new file mode 100644 index 0000000..2421a7a --- /dev/null +++ b/backend/.sqlx/query-de6d39c5473742a38ff437b95613ae32a366f4339c8165f58d192151024b6caf.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, 'admin')", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "de6d39c5473742a38ff437b95613ae32a366f4339c8165f58d192151024b6caf" +} diff --git a/backend/.sqlx/query-dee9b81f66d0a8a8ec6543e2bda43b68604377280236f83eec9bb710c4a8b957.json b/backend/.sqlx/query-dee9b81f66d0a8a8ec6543e2bda43b68604377280236f83eec9bb710c4a8b957.json new file mode 100644 index 0000000..b8cf4ce --- /dev/null +++ b/backend/.sqlx/query-dee9b81f66d0a8a8ec6543e2bda43b68604377280236f83eec9bb710c4a8b957.json @@ -0,0 +1,66 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n snapshot_date, total_members, active_members,\n proposals_created, votes_cast, unique_voters,\n voter_turnout_rate::float8 AS turnout,\n engagement_score::float8 AS engagement\n FROM participation_snapshots\n WHERE community_id = $1\n AND snapshot_date BETWEEN $2 AND $3\n ORDER BY snapshot_date", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snapshot_date", + "type_info": "Date" + }, + { + "ordinal": 1, + "name": "total_members", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "active_members", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "proposals_created", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "votes_cast", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "unique_voters", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "turnout", + "type_info": "Float8" + }, + { + "ordinal": 7, + "name": "engagement", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Date", + "Date" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + null, + null + ] + }, + "hash": "dee9b81f66d0a8a8ec6543e2bda43b68604377280236f83eec9bb710c4a8b957" +} diff --git a/backend/.sqlx/query-df3c9235defd412e75b967ec4c1d052404a6289ec89ebc3cf7524f0a65279258.json b/backend/.sqlx/query-df3c9235defd412e75b967ec4c1d052404a6289ec89ebc3cf7524f0a65279258.json new file mode 100644 index 0000000..d086ff1 --- /dev/null +++ b/backend/.sqlx/query-df3c9235defd412e75b967ec4c1d052404a6289ec89ebc3cf7524f0a65279258.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, email, community_id, is_active, expires_at, max_uses, uses_count\n FROM invitations WHERE code = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "expires_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "max_uses", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "uses_count", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "df3c9235defd412e75b967ec4c1d052404a6289ec89ebc3cf7524f0a65279258" +} diff --git a/backend/.sqlx/query-df917df33b1f18f0545d866fdcdbff3c189c549e3e92966b808166ebd65a758e.json b/backend/.sqlx/query-df917df33b1f18f0545d866fdcdbff3c189c549e3e92966b808166ebd65a758e.json new file mode 100644 index 0000000..5b2d5d3 --- /dev/null +++ b/backend/.sqlx/query-df917df33b1f18f0545d866fdcdbff3c189c549e3e92966b808166ebd65a758e.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT pi.id, pi.workflow_instance_id, wp.auto_advance, wp.failure_action\n FROM phase_instances pi\n JOIN workflow_phases wp ON wp.id = pi.phase_id\n WHERE pi.status = 'active' AND pi.scheduled_end < NOW()", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workflow_instance_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "auto_advance", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "failure_action", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "df917df33b1f18f0545d866fdcdbff3c189c549e3e92966b808166ebd65a758e" +} diff --git a/backend/.sqlx/query-e078bd7e0e60f80321173ea5383e3c89f5b07036b7f6c3be6a66e25dd64d8790.json b/backend/.sqlx/query-e078bd7e0e60f80321173ea5383e3c89f5b07036b7f6c3be6a66e25dd64d8790.json new file mode 100644 index 0000000..ff8b45f --- /dev/null +++ b/backend/.sqlx/query-e078bd7e0e60f80321173ea5383e3c89f5b07036b7f6c3be6a66e25dd64d8790.json @@ -0,0 +1,59 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE delegations \n SET is_active = FALSE, revoked_at = NOW()\n WHERE id = $1 AND delegator_id = $2 AND is_active = TRUE\n RETURNING delegate_id, scope as \"scope: DelegationScope\", community_id, topic_id, proposal_id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "delegate_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "scope: DelegationScope", + "type_info": { + "Custom": { + "name": "delegation_scope", + "kind": { + "Enum": [ + "global", + "community", + "topic", + "proposal" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "topic_id", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "proposal_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + true, + true + ] + }, + "hash": "e078bd7e0e60f80321173ea5383e3c89f5b07036b7f6c3be6a66e25dd64d8790" +} diff --git a/backend/.sqlx/query-e15d8685afde9f89919d3643dd1df3d4718d826147e63be4c8d4e8708a15e481.json b/backend/.sqlx/query-e15d8685afde9f89919d3643dd1df3d4718d826147e63be4c8d4e8708a15e481.json new file mode 100644 index 0000000..be1396f --- /dev/null +++ b/backend/.sqlx/query-e15d8685afde9f89919d3643dd1df3d4718d826147e63be4c8d4e8708a15e481.json @@ -0,0 +1,82 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT p.id, p.title, p.description, p.status as \"status: String\", p.created_at,\n c.name as community_name, c.slug as community_slug,\n COALESCE((SELECT COUNT(*) FROM votes v JOIN proposal_options po ON v.option_id = po.id WHERE po.proposal_id = p.id), 0) as vote_count,\n COALESCE((SELECT COUNT(*) FROM comments WHERE proposal_id = p.id), 0) as comment_count\n FROM proposals p\n JOIN communities c ON p.community_id = c.id\n WHERE c.is_active = true\n ORDER BY p.created_at DESC\n LIMIT 50\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "title", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "status: String", + "type_info": { + "Custom": { + "name": "proposal_status", + "kind": { + "Enum": [ + "draft", + "discussion", + "voting", + "closed", + "archived", + "calculating" + ] + } + } + } + }, + { + "ordinal": 4, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "community_name", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "community_slug", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "vote_count", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "comment_count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + null, + null + ] + }, + "hash": "e15d8685afde9f89919d3643dd1df3d4718d826147e63be4c8d4e8708a15e481" +} diff --git a/backend/.sqlx/query-e1a40a1cc7c618d815ebfaec6751ab19146ab76519020ce783bc916e64155a14.json b/backend/.sqlx/query-e1a40a1cc7c618d815ebfaec6751ab19146ab76519020ce783bc916e64155a14.json new file mode 100644 index 0000000..3a544a6 --- /dev/null +++ b/backend/.sqlx/query-e1a40a1cc7c618d815ebfaec6751ab19146ab76519020ce783bc916e64155a14.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT snapshot_date, total_members, active_members, votes_cast\n FROM participation_snapshots\n WHERE community_id = $1\n ORDER BY snapshot_date DESC\n LIMIT 365", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snapshot_date", + "type_info": "Date" + }, + { + "ordinal": 1, + "name": "total_members", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "active_members", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "votes_cast", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "e1a40a1cc7c618d815ebfaec6751ab19146ab76519020ce783bc916e64155a14" +} diff --git a/backend/.sqlx/query-e25c2c4e795fd113fbf9631b4fc107d217f2dd0a8eacd812067defe8d3529d54.json b/backend/.sqlx/query-e25c2c4e795fd113fbf9631b4fc107d217f2dd0a8eacd812067defe8d3529d54.json new file mode 100644 index 0000000..885d355 --- /dev/null +++ b/backend/.sqlx/query-e25c2c4e795fd113fbf9631b4fc107d217f2dd0a8eacd812067defe8d3529d54.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT o.id, o.label, o.description, COUNT(v.id) as vote_count\n FROM proposal_options o\n LEFT JOIN votes v ON v.option_id = o.id\n WHERE o.proposal_id = $1\n GROUP BY o.id\n ORDER BY o.sort_order", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "label", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "vote_count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + true, + null + ] + }, + "hash": "e25c2c4e795fd113fbf9631b4fc107d217f2dd0a8eacd812067defe8d3529d54" +} diff --git a/backend/.sqlx/query-e2a9b8f8fb63c4e8737c0f2925ffeff025be37184ede64b328e0ecd68680dc08.json b/backend/.sqlx/query-e2a9b8f8fb63c4e8737c0f2925ffeff025be37184ede64b328e0ecd68680dc08.json new file mode 100644 index 0000000..5918d6f --- /dev/null +++ b/backend/.sqlx/query-e2a9b8f8fb63c4e8737c0f2925ffeff025be37184ede64b328e0ecd68680dc08.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload)\n VALUES ($1, $2, NULL, 'plugin.package_installed', $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "e2a9b8f8fb63c4e8737c0f2925ffeff025be37184ede64b328e0ecd68680dc08" +} diff --git a/backend/.sqlx/query-e3e45d1876b1a38771e1bb32eda68462ba27bd0beca41a602ebf60b86b863680.json b/backend/.sqlx/query-e3e45d1876b1a38771e1bb32eda68462ba27bd0beca41a602ebf60b86b863680.json new file mode 100644 index 0000000..2390cdb --- /dev/null +++ b/backend/.sqlx/query-e3e45d1876b1a38771e1bb32eda68462ba27bd0beca41a602ebf60b86b863680.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "role", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e3e45d1876b1a38771e1bb32eda68462ba27bd0beca41a602ebf60b86b863680" +} diff --git a/backend/.sqlx/query-e53f34921983218ba03123ea07403917a62cb0fc66039be84387df090606eefc.json b/backend/.sqlx/query-e53f34921983218ba03123ea07403917a62cb0fc66039be84387df090606eefc.json new file mode 100644 index 0000000..429d0f7 --- /dev/null +++ b/backend/.sqlx/query-e53f34921983218ba03123ea07403917a62cb0fc66039be84387df090606eefc.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE export_jobs SET status = 'processing', started_at = NOW() WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "e53f34921983218ba03123ea07403917a62cb0fc66039be84387df090606eefc" +} diff --git a/backend/.sqlx/query-e5ada1e1857eec30e7c2a55c19b0ddbf2c90fc7f0019f211549f2abfc4fdf487.json b/backend/.sqlx/query-e5ada1e1857eec30e7c2a55c19b0ddbf2c90fc7f0019f211549f2abfc4fdf487.json new file mode 100644 index 0000000..3577fe3 --- /dev/null +++ b/backend/.sqlx/query-e5ada1e1857eec30e7c2a55c19b0ddbf2c90fc7f0019f211549f2abfc4fdf487.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT reject_registration($1, $2, $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "reject_registration", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e5ada1e1857eec30e7c2a55c19b0ddbf2c90fc7f0019f211549f2abfc4fdf487" +} diff --git a/backend/.sqlx/query-e70b6cc45322a8ba1c3e9fef0c8c3185bbeacbcbf16810c303c029ea108802bf.json b/backend/.sqlx/query-e70b6cc45322a8ba1c3e9fef0c8c3185bbeacbcbf16810c303c029ea108802bf.json new file mode 100644 index 0000000..d53239e --- /dev/null +++ b/backend/.sqlx/query-e70b6cc45322a8ba1c3e9fef0c8c3185bbeacbcbf16810c303c029ea108802bf.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT cf.id, cf.local_community_id, cf.remote_instance_id,\n fi.instance_url, cf.sync_direction::text AS \"sync_direction!\"\n FROM community_federations cf\n JOIN federated_instances fi ON fi.id = cf.remote_instance_id\n WHERE cf.status = 'active' AND fi.status = 'active'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "local_community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "remote_instance_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "instance_url", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "sync_direction!", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + null + ] + }, + "hash": "e70b6cc45322a8ba1c3e9fef0c8c3185bbeacbcbf16810c303c029ea108802bf" +} diff --git a/backend/.sqlx/query-e983f01536d1ed4b6b40d640877d9fa445aa04972cfe427116cbdf104e6cb32f.json b/backend/.sqlx/query-e983f01536d1ed4b6b40d640877d9fa445aa04972cfe427116cbdf104e6cb32f.json new file mode 100644 index 0000000..2927676 --- /dev/null +++ b/backend/.sqlx/query-e983f01536d1ed4b6b40d640877d9fa445aa04972cfe427116cbdf104e6cb32f.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE conflict_cases SET status = 'mediation', updated_at = NOW() WHERE id = $1 AND status = 'acknowledged'", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "e983f01536d1ed4b6b40d640877d9fa445aa04972cfe427116cbdf104e6cb32f" +} diff --git a/backend/.sqlx/query-ea775b89fed8c652e57e21cd202dbb9c4d13dedc7d0890dd1d13dfa7635f02b5.json b/backend/.sqlx/query-ea775b89fed8c652e57e21cd202dbb9c4d13dedc7d0890dd1d13dfa7635f02b5.json new file mode 100644 index 0000000..bede224 --- /dev/null +++ b/backend/.sqlx/query-ea775b89fed8c652e57e21cd202dbb9c4d13dedc7d0890dd1d13dfa7635f02b5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM proposals WHERE author_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ea775b89fed8c652e57e21cd202dbb9c4d13dedc7d0890dd1d13dfa7635f02b5" +} diff --git a/backend/.sqlx/query-ea7e6d82c94b562d5e75720c8c1af6ad707ef717406330be0c4255363f9c783d.json b/backend/.sqlx/query-ea7e6d82c94b562d5e75720c8c1af6ad707ef717406330be0c4255363f9c783d.json new file mode 100644 index 0000000..40b8c27 --- /dev/null +++ b/backend/.sqlx/query-ea7e6d82c94b562d5e75720c8c1af6ad707ef717406330be0c4255363f9c783d.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO instance_plugins (plugin_name, is_enabled, enabled_by, enabled_at)\n VALUES ($1, $2, $3, NOW())\n ON CONFLICT (plugin_name) DO UPDATE SET is_enabled = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Bool", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "ea7e6d82c94b562d5e75720c8c1af6ad707ef717406330be0c4255363f9c783d" +} diff --git a/backend/.sqlx/query-eab47df4e85e9dfeb87b480dc092b09924d6239ba71b8715be5c56137ebca953.json b/backend/.sqlx/query-eab47df4e85e9dfeb87b480dc092b09924d6239ba71b8715be5c56137ebca953.json new file mode 100644 index 0000000..94c6d04 --- /dev/null +++ b/backend/.sqlx/query-eab47df4e85e9dfeb87b480dc092b09924d6239ba71b8715be5c56137ebca953.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT r.id as resource_id, rr.read_at as \"read_at?\"\n FROM proposal_resources r\n LEFT JOIN proposal_resource_reads rr ON r.id = rr.resource_id AND rr.user_id = $2\n WHERE r.proposal_id = $1\n ORDER BY r.sort_order", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "resource_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "read_at?", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "eab47df4e85e9dfeb87b480dc092b09924d6239ba71b8715be5c56137ebca953" +} diff --git a/backend/.sqlx/query-ec156d9abd97de863483a1d8fe3e4ff63a63b39f883d320175c88b0bcaad3f70.json b/backend/.sqlx/query-ec156d9abd97de863483a1d8fe3e4ff63a63b39f883d320175c88b0bcaad3f70.json new file mode 100644 index 0000000..2dc05f7 --- /dev/null +++ b/backend/.sqlx/query-ec156d9abd97de863483a1d8fe3e4ff63a63b39f883d320175c88b0bcaad3f70.json @@ -0,0 +1,94 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT vmp.id, vmp.name, vmp.display_name, vmp.description, vmp.icon,\n vmp.is_active as platform_active, vmp.config_schema, vmp.default_config,\n vmp.complexity_level, vmp.supports_delegation,\n COALESCE(cvm.is_enabled, vmp.is_active) as is_enabled,\n COALESCE(cvm.is_default, vmp.is_default) as is_default,\n COALESCE(cvm.config, vmp.default_config) as config\n FROM voting_method_plugins vmp\n LEFT JOIN community_voting_methods cvm \n ON vmp.id = cvm.voting_method_id AND cvm.community_id = $1\n WHERE vmp.is_active = TRUE\n ORDER BY vmp.name", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "icon", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "platform_active", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "config_schema", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "default_config", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "complexity_level", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "supports_delegation", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "is_enabled", + "type_info": "Bool" + }, + { + "ordinal": 11, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 12, + "name": "config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + true, + true, + true, + false, + null, + null, + null + ] + }, + "hash": "ec156d9abd97de863483a1d8fe3e4ff63a63b39f883d320175c88b0bcaad3f70" +} diff --git a/backend/.sqlx/query-ec34b2dc8930568c2e135aa13d08783d7b62dd524eef99f00b8bebd55f0196bd.json b/backend/.sqlx/query-ec34b2dc8930568c2e135aa13d08783d7b62dd524eef99f00b8bebd55f0196bd.json new file mode 100644 index 0000000..bf95656 --- /dev/null +++ b/backend/.sqlx/query-ec34b2dc8930568c2e135aa13d08783d7b62dd524eef99f00b8bebd55f0196bd.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM community_members WHERE user_id = $1 AND community_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "ec34b2dc8930568c2e135aa13d08783d7b62dd524eef99f00b8bebd55f0196bd" +} diff --git a/backend/.sqlx/query-ed05dcba73fd4426c2de9c8828d943e45eafa31dbb155c4a50b2471308dd3097.json b/backend/.sqlx/query-ed05dcba73fd4426c2de9c8828d943e45eafa31dbb155c4a50b2471308dd3097.json new file mode 100644 index 0000000..c143ba2 --- /dev/null +++ b/backend/.sqlx/query-ed05dcba73fd4426c2de9c8828d943e45eafa31dbb155c4a50b2471308dd3097.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT use_invitation($1, $2, $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "use_invitation", + "type_info": "Record" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Varchar" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ed05dcba73fd4426c2de9c8828d943e45eafa31dbb155c4a50b2471308dd3097" +} diff --git a/backend/.sqlx/query-ef47b9016770614a3d12c6bba1c99b60046a3bff47ca5e18759786362446e1be.json b/backend/.sqlx/query-ef47b9016770614a3d12c6bba1c99b60046a3bff47ca5e18759786362446e1be.json new file mode 100644 index 0000000..da57bf2 --- /dev/null +++ b/backend/.sqlx/query-ef47b9016770614a3d12c6bba1c99b60046a3bff47ca5e18759786362446e1be.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO star_votes (proposal_id, voter_id, option_id, stars) VALUES ($1, $2, $3, $4)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "ef47b9016770614a3d12c6bba1c99b60046a3bff47ca5e18759786362446e1be" +} diff --git a/backend/.sqlx/query-f10d14593a49a3f14536b6ec7a0283b16f57a731f18f1ea00a25b76aeda238cb.json b/backend/.sqlx/query-f10d14593a49a3f14536b6ec7a0283b16f57a731f18f1ea00a25b76aeda238cb.json new file mode 100644 index 0000000..2bb80bb --- /dev/null +++ b/backend/.sqlx/query-f10d14593a49a3f14536b6ec7a0283b16f57a731f18f1ea00a25b76aeda238cb.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO deliberation_summaries (\n proposal_id, summary_type, content, key_points, last_editor_id\n ) VALUES ($1, $2::summary_type, $3, $4, $5)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + { + "Custom": { + "name": "summary_type", + "kind": { + "Enum": [ + "executive", + "pro_arguments", + "con_arguments", + "consensus", + "contention", + "questions", + "full" + ] + } + } + }, + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f10d14593a49a3f14536b6ec7a0283b16f57a731f18f1ea00a25b76aeda238cb" +} diff --git a/backend/.sqlx/query-f1de9db0b037bd44be7aa37b308ce60a0c10888951f385b61fe237e09b924976.json b/backend/.sqlx/query-f1de9db0b037bd44be7aa37b308ce60a0c10888951f385b61fe237e09b924976.json new file mode 100644 index 0000000..dc0ca33 --- /dev/null +++ b/backend/.sqlx/query-f1de9db0b037bd44be7aa37b308ce60a0c10888951f385b61fe237e09b924976.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO votes (proposal_id, option_id, voter_id) VALUES ($1, $2, $3)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "f1de9db0b037bd44be7aa37b308ce60a0c10888951f385b61fe237e09b924976" +} diff --git a/backend/.sqlx/query-f45f95ec5c791d1bd87635f304f62caf19b12dc178c570828a8bb8a5579ea17a.json b/backend/.sqlx/query-f45f95ec5c791d1bd87635f304f62caf19b12dc178c570828a8bb8a5579ea17a.json new file mode 100644 index 0000000..b6c6160 --- /dev/null +++ b/backend/.sqlx/query-f45f95ec5c791d1bd87635f304f62caf19b12dc178c570828a8bb8a5579ea17a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(\n SELECT 1 FROM community_members \n WHERE community_id = $1 AND user_id = $2 AND role IN ('admin', 'moderator')\n ) AS \"exists!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f45f95ec5c791d1bd87635f304f62caf19b12dc178c570828a8bb8a5579ea17a" +} diff --git a/backend/.sqlx/query-f5476f8976f2c0ab202fe52ea148d95570f6e3901f6ef0637e58701eb42d0120.json b/backend/.sqlx/query-f5476f8976f2c0ab202fe52ea148d95570f6e3901f6ef0637e58701eb42d0120.json new file mode 100644 index 0000000..8307812 --- /dev/null +++ b/backend/.sqlx/query-f5476f8976f2c0ab202fe52ea148d95570f6e3901f6ef0637e58701eb42d0120.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT r.id, r.name, r.display_name, r.description, r.color,\n r.is_system, r.is_default, r.priority,\n ARRAY_AGG(p.name) FILTER (WHERE p.name IS NOT NULL) as permissions\n FROM roles r\n LEFT JOIN role_permissions rp ON r.id = rp.role_id AND rp.granted = TRUE\n LEFT JOIN permissions p ON rp.permission_id = p.id\n WHERE r.community_id = $1\n GROUP BY r.id\n ORDER BY r.priority DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "color", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_system", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "is_default", + "type_info": "Bool" + }, + { + "ordinal": 7, + "name": "priority", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "permissions", + "type_info": "VarcharArray" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true, + true, + false, + false, + false, + null + ] + }, + "hash": "f5476f8976f2c0ab202fe52ea148d95570f6e3901f6ef0637e58701eb42d0120" +} diff --git a/backend/.sqlx/query-f5d1ab63c42cd0e45cfab56f5f4efd821816137eacf138aa5191dbe47c5bd444.json b/backend/.sqlx/query-f5d1ab63c42cd0e45cfab56f5f4efd821816137eacf138aa5191dbe47c5bd444.json new file mode 100644 index 0000000..734987e --- /dev/null +++ b/backend/.sqlx/query-f5d1ab63c42cd0e45cfab56f5f4efd821816137eacf138aa5191dbe47c5bd444.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE federated_instances SET trust_level = $2, updated_at = NOW() WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "f5d1ab63c42cd0e45cfab56f5f4efd821816137eacf138aa5191dbe47c5bd444" +} diff --git a/backend/.sqlx/query-f63fca5359b5dc0141d7de8e8f4c3ec1e7b446d48ab6d175c8b876823bd042a9.json b/backend/.sqlx/query-f63fca5359b5dc0141d7de8e8f4c3ec1e7b446d48ab6d175c8b876823bd042a9.json new file mode 100644 index 0000000..590042f --- /dev/null +++ b/backend/.sqlx/query-f63fca5359b5dc0141d7de8e8f4c3ec1e7b446d48ab6d175c8b876823bd042a9.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE community_settings SET\n membership_mode = COALESCE($2, membership_mode),\n moderation_mode = COALESCE($3, moderation_mode),\n governance_model = COALESCE($4, governance_model),\n plugin_policy = COALESCE($5, plugin_policy)\n WHERE community_id = $1\n RETURNING community_id, membership_mode, moderation_mode,\n governance_model, plugin_policy, features_enabled", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "membership_mode", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "moderation_mode", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "governance_model", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "plugin_policy", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "features_enabled", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Varchar", + "Varchar", + "Varchar" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "f63fca5359b5dc0141d7de8e8f4c3ec1e7b446d48ab6d175c8b876823bd042a9" +} diff --git a/backend/.sqlx/query-f7bb4e7ba6e7a6abb1549bc112176ca7afdb25923aa3aae3775a65cbd17afeaa.json b/backend/.sqlx/query-f7bb4e7ba6e7a6abb1549bc112176ca7afdb25923aa3aae3775a65cbd17afeaa.json new file mode 100644 index 0000000..6abbac6 --- /dev/null +++ b/backend/.sqlx/query-f7bb4e7ba6e7a6abb1549bc112176ca7afdb25923aa3aae3775a65cbd17afeaa.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT author_id, community_id, title FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "author_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "community_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "title", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "f7bb4e7ba6e7a6abb1549bc112176ca7afdb25923aa3aae3775a65cbd17afeaa" +} diff --git a/backend/.sqlx/query-f91db2c61f14281c7dfd6f6aefdffc97e080dd9acc5d9c5edd8a865623b44628.json b/backend/.sqlx/query-f91db2c61f14281c7dfd6f6aefdffc97e080dd9acc5d9c5edd8a865623b44628.json new file mode 100644 index 0000000..bebfe55 --- /dev/null +++ b/backend/.sqlx/query-f91db2c61f14281c7dfd6f6aefdffc97e080dd9acc5d9c5edd8a865623b44628.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT r.id, r.name, r.display_name, r.color\n FROM roles r\n JOIN user_roles ur ON r.id = ur.role_id\n WHERE ur.user_id = $1 \n AND (ur.community_id = $2 OR ur.community_id IS NULL)\n AND (ur.expires_at IS NULL OR ur.expires_at > NOW())\n ORDER BY r.priority DESC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "display_name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "color", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "f91db2c61f14281c7dfd6f6aefdffc97e080dd9acc5d9c5edd8a865623b44628" +} diff --git a/backend/.sqlx/query-f9ff82cdb66393d4fdef38cf4e64b93b9f8b062f06cd3f7bde3b0873ecbfc835.json b/backend/.sqlx/query-f9ff82cdb66393d4fdef38cf4e64b93b9f8b062f06cd3f7bde3b0873ecbfc835.json new file mode 100644 index 0000000..5c660a2 --- /dev/null +++ b/backend/.sqlx/query-f9ff82cdb66393d4fdef38cf4e64b93b9f8b062f06cd3f7bde3b0873ecbfc835.json @@ -0,0 +1,68 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM communities WHERE is_active = true ORDER BY created_at DESC LIMIT 100", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "slug", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "settings", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "is_active", + "type_info": "Bool" + }, + { + "ordinal": 8, + "name": "created_by", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + true + ] + }, + "hash": "f9ff82cdb66393d4fdef38cf4e64b93b9f8b062f06cd3f7bde3b0873ecbfc835" +} diff --git a/backend/.sqlx/query-facfb30591b5abfa384d897e0de7c5d0f0e9f80cf8c6e57a1c5298ad1a990351.json b/backend/.sqlx/query-facfb30591b5abfa384d897e0de7c5d0f0e9f80cf8c6e57a1c5298ad1a990351.json new file mode 100644 index 0000000..12aefb6 --- /dev/null +++ b/backend/.sqlx/query-facfb30591b5abfa384d897e0de7c5d0f0e9f80cf8c6e57a1c5298ad1a990351.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE phase_instances\n SET quorum_reached = (\n SELECT is_met FROM quorum_snapshots \n WHERE phase_instance_id = $1 \n ORDER BY snapshot_time DESC LIMIT 1\n ),\n quorum_reached_at = CASE \n WHEN quorum_reached = false AND (\n SELECT is_met FROM quorum_snapshots \n WHERE phase_instance_id = $1 \n ORDER BY snapshot_time DESC LIMIT 1\n ) THEN NOW()\n ELSE quorum_reached_at\n END\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "facfb30591b5abfa384d897e0de7c5d0f0e9f80cf8c6e57a1c5298ad1a990351" +} diff --git a/backend/.sqlx/query-faf3d877c077b20211fe2cbf92c54322660147ab31fe5036e3e722725c2e3e44.json b/backend/.sqlx/query-faf3d877c077b20211fe2cbf92c54322660147ab31fe5036e3e722725c2e3e44.json new file mode 100644 index 0000000..8372d85 --- /dev/null +++ b/backend/.sqlx/query-faf3d877c077b20211fe2cbf92c54322660147ab31fe5036e3e722725c2e3e44.json @@ -0,0 +1,88 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n id, template_id, name, phase_type::text AS \"phase_type!\",\n sequence_order, description, default_duration_hours,\n quorum_type, quorum_value::float8 AS \"quorum_value!\",\n allow_early_completion, auto_advance, phase_config\n FROM workflow_phases\n WHERE template_id = $1\n ORDER BY sequence_order", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "template_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "phase_type!", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "sequence_order", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "description", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "default_duration_hours", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "quorum_type", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "quorum_value!", + "type_info": "Float8" + }, + { + "ordinal": 9, + "name": "allow_early_completion", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "auto_advance", + "type_info": "Bool" + }, + { + "ordinal": 11, + "name": "phase_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + null, + false, + true, + false, + false, + null, + false, + false, + false + ] + }, + "hash": "faf3d877c077b20211fe2cbf92c54322660147ab31fe5036e3e722725c2e3e44" +} diff --git a/backend/.sqlx/query-fb2e03f8ccf37e8463ee7f14e74fc20fdb46337da0f561b4fa0eb815fb0486b4.json b/backend/.sqlx/query-fb2e03f8ccf37e8463ee7f14e74fc20fdb46337da0f561b4fa0eb815fb0486b4.json new file mode 100644 index 0000000..6b02307 --- /dev/null +++ b/backend/.sqlx/query-fb2e03f8ccf37e8463ee7f14e74fc20fdb46337da0f561b4fa0eb815fb0486b4.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, 'member') ON CONFLICT DO NOTHING", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "fb2e03f8ccf37e8463ee7f14e74fc20fdb46337da0f561b4fa0eb815fb0486b4" +} diff --git a/backend/.sqlx/query-fcdb75aee759125fdb4169e6c9dd2b47b4f264c512bd62db700083cf479d6d3f.json b/backend/.sqlx/query-fcdb75aee759125fdb4169e6c9dd2b47b4f264c512bd62db700083cf479d6d3f.json new file mode 100644 index 0000000..8aa1ce3 --- /dev/null +++ b/backend/.sqlx/query-fcdb75aee759125fdb4169e6c9dd2b47b4f264c512bd62db700083cf479d6d3f.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT community_id FROM proposals WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "community_id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fcdb75aee759125fdb4169e6c9dd2b47b4f264c512bd62db700083cf479d6d3f" +} diff --git a/backend/.sqlx/query-fdfe11a59054df6111a7833043bf8cfb772c5897d1aa9c25ccad0579945e74f2.json b/backend/.sqlx/query-fdfe11a59054df6111a7833043bf8cfb772c5897d1aa9c25ccad0579945e74f2.json new file mode 100644 index 0000000..9f58a6a --- /dev/null +++ b/backend/.sqlx/query-fdfe11a59054df6111a7833043bf8cfb772c5897d1aa9c25ccad0579945e74f2.json @@ -0,0 +1,52 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n COUNT(*) FILTER (WHERE position = 'strongly_support') as strongly_support,\n COUNT(*) FILTER (WHERE position = 'support') as support,\n COUNT(*) FILTER (WHERE position = 'neutral') as neutral,\n COUNT(*) FILTER (WHERE position = 'oppose') as oppose,\n COUNT(*) FILTER (WHERE position = 'strongly_oppose') as strongly_oppose,\n COUNT(*) as total\n FROM proposal_positions\n WHERE proposal_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "strongly_support", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "support", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "neutral", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "oppose", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "strongly_oppose", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "total", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null, + null, + null, + null, + null + ] + }, + "hash": "fdfe11a59054df6111a7833043bf8cfb772c5897d1aa9c25ccad0579945e74f2" +} diff --git a/backend/.sqlx/query-fe3dbfffdaca1dcf828be7cc949f7f74b05f694dc8ef3ee37a3dac8f695ec62d.json b/backend/.sqlx/query-fe3dbfffdaca1dcf828be7cc949f7f74b05f694dc8ef3ee37a3dac8f695ec62d.json new file mode 100644 index 0000000..51cf4fc --- /dev/null +++ b/backend/.sqlx/query-fe3dbfffdaca1dcf828be7cc949f7f74b05f694dc8ef3ee37a3dac8f695ec62d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(\n SELECT 1 FROM community_members WHERE community_id = $1 AND user_id = $2\n ) AS \"exists!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "fe3dbfffdaca1dcf828be7cc949f7f74b05f694dc8ef3ee37a3dac8f695ec62d" +} diff --git a/backend/.sqlx/query-fe5ef9f6c10dfb61e236bc955d377dd8b0893e052ee073b55fbb1b8f745a9cd5.json b/backend/.sqlx/query-fe5ef9f6c10dfb61e236bc955d377dd8b0893e052ee073b55fbb1b8f745a9cd5.json new file mode 100644 index 0000000..adc3458 --- /dev/null +++ b/backend/.sqlx/query-fe5ef9f6c10dfb61e236bc955d377dd8b0893e052ee073b55fbb1b8f745a9cd5.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(\n SELECT 1 FROM user_roles ur\n JOIN roles r ON r.id = ur.role_id\n WHERE ur.user_id = $1 AND r.name = 'platform_admin'\n ) AS \"exists!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "fe5ef9f6c10dfb61e236bc955d377dd8b0893e052ee073b55fbb1b8f745a9cd5" +} diff --git a/backend/Cargo.toml b/backend/Cargo.toml new file mode 100644 index 0000000..e674a60 --- /dev/null +++ b/backend/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "likwid" +version = "0.1.0" +edition = "2021" + +[dependencies] +tokio = { version = "1", features = ["full"] } +axum = "0.8" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid", "chrono", "json", "migrate"] } +dotenvy = "0.15" +tracing = "0.1" +tracing-subscriber = "0.3" +uuid = { version = "1", features = ["serde", "v4"] } +chrono = { version = "0.4", features = ["serde"] } +envy = "0.4" +async-trait = "0.1" +tower-http = { version = "0.6", features = ["cors", "trace"] } +argon2 = "0.5" +jsonwebtoken = "9" +axum-extra = { version = "0.10", features = ["typed-header"] } +thiserror = "2" +jsonschema = "0.17" +base64 = "0.21" +sha2 = "0.10" +ed25519-dalek = "2" +reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] } +wasmtime = "19" +wasmtime-wasi = "19" +slug = "0.1" diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..04abf71 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,48 @@ +# Likwid Backend Dockerfile +FROM rust:1.75-slim-bookworm as builder + +WORKDIR /app + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + +# Copy manifests +COPY Cargo.toml Cargo.lock ./ + +# Create dummy main to cache dependencies +RUN mkdir src && echo "fn main() {}" > src/main.rs +RUN cargo build --release +RUN rm -rf src + +# Copy source code +COPY src ./src +COPY migrations ./migrations + +# Build the actual binary +RUN touch src/main.rs && cargo build --release + +# Runtime stage +FROM debian:bookworm-slim + +WORKDIR /app + +# Install runtime dependencies +RUN apt-get update && apt-get install -y \ + ca-certificates \ + libssl3 \ + && rm -rf /var/lib/apt/lists/* + +# Copy binary and migrations +COPY --from=builder /app/target/release/likwid /app/likwid +COPY --from=builder /app/migrations /app/migrations + +# Create non-root user +RUN useradd -r -s /bin/false likwid +USER likwid + +EXPOSE 3000 + +CMD ["./likwid"] diff --git a/backend/migrations/20260125180102_initial_schema.sql b/backend/migrations/20260125180102_initial_schema.sql new file mode 100644 index 0000000..24cbaea --- /dev/null +++ b/backend/migrations/20260125180102_initial_schema.sql @@ -0,0 +1,108 @@ +-- Likwid Initial Schema +-- Modular Governance Platform + +-- Enable UUID extension +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + +-- Users table (civic identity) +CREATE TABLE users ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + username VARCHAR(50) UNIQUE NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + password_hash VARCHAR(255) NOT NULL, + display_name VARCHAR(100), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + is_active BOOLEAN NOT NULL DEFAULT TRUE +); + +-- Communities (supports multi-community mode) +CREATE TABLE communities ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) UNIQUE NOT NULL, + slug VARCHAR(100) UNIQUE NOT NULL, + description TEXT, + settings JSONB NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + is_active BOOLEAN NOT NULL DEFAULT TRUE +); + +-- Community membership +CREATE TABLE community_members ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + role VARCHAR(50) NOT NULL DEFAULT 'member', + joined_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(user_id, community_id) +); + +-- Voting identities (separate from civic identity for anonymity) +CREATE TABLE voting_identities ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + pseudonym VARCHAR(100) NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(user_id, community_id), + UNIQUE(pseudonym, community_id) +); + +-- Plugins registry +CREATE TABLE plugins ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) UNIQUE NOT NULL, + version VARCHAR(20) NOT NULL, + description TEXT, + is_core BOOLEAN NOT NULL DEFAULT FALSE, + is_active BOOLEAN NOT NULL DEFAULT TRUE, + settings_schema JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Plugin activation per community +CREATE TABLE community_plugins ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + plugin_id UUID NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + settings JSONB NOT NULL DEFAULT '{}', + is_active BOOLEAN NOT NULL DEFAULT TRUE, + activated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, plugin_id) +); + +-- Moderation log (immutable, non-deactivatable per spec) +CREATE TABLE moderation_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + moderator_id UUID REFERENCES users(id), + target_user_id UUID REFERENCES users(id), + action_type VARCHAR(50) NOT NULL, + reason TEXT NOT NULL, + details JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Create indexes +CREATE INDEX idx_community_members_user ON community_members(user_id); +CREATE INDEX idx_community_members_community ON community_members(community_id); +CREATE INDEX idx_voting_identities_user ON voting_identities(user_id); +CREATE INDEX idx_moderation_log_community ON moderation_log(community_id); +CREATE INDEX idx_moderation_log_created ON moderation_log(created_at); + +-- Updated_at trigger function +CREATE OR REPLACE FUNCTION update_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Apply trigger to tables with updated_at +CREATE TRIGGER users_updated_at BEFORE UPDATE ON users + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER communities_updated_at BEFORE UPDATE ON communities + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260125181224_voting_system.sql b/backend/migrations/20260125181224_voting_system.sql new file mode 100644 index 0000000..ac08ff2 --- /dev/null +++ b/backend/migrations/20260125181224_voting_system.sql @@ -0,0 +1,50 @@ +-- Voting System Schema + +-- Proposal status enum +CREATE TYPE proposal_status AS ENUM ('draft', 'discussion', 'voting', 'closed', 'archived'); + +-- Proposals table +CREATE TABLE proposals ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + author_id UUID NOT NULL REFERENCES users(id), + title VARCHAR(255) NOT NULL, + description TEXT NOT NULL, + status proposal_status NOT NULL DEFAULT 'draft', + voting_method VARCHAR(50) NOT NULL DEFAULT 'approval', + voting_starts_at TIMESTAMPTZ, + voting_ends_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Proposal options (for voting) +CREATE TABLE proposal_options ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + label VARCHAR(255) NOT NULL, + description TEXT, + sort_order INT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Votes table (approval voting - can vote for multiple options) +CREATE TABLE votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, option_id, voter_id) +); + +-- Indexes +CREATE INDEX idx_proposals_community ON proposals(community_id); +CREATE INDEX idx_proposals_status ON proposals(status); +CREATE INDEX idx_proposal_options_proposal ON proposal_options(proposal_id); +CREATE INDEX idx_votes_proposal ON votes(proposal_id); +CREATE INDEX idx_votes_option ON votes(option_id); + +-- Updated_at trigger +CREATE TRIGGER proposals_updated_at BEFORE UPDATE ON proposals + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260125183611_comments.sql b/backend/migrations/20260125183611_comments.sql new file mode 100644 index 0000000..31fca97 --- /dev/null +++ b/backend/migrations/20260125183611_comments.sql @@ -0,0 +1,16 @@ +-- Comments table for proposal discussions +CREATE TABLE comments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + author_id UUID NOT NULL REFERENCES users(id), + content TEXT NOT NULL, + parent_id UUID REFERENCES comments(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_comments_proposal ON comments(proposal_id); +CREATE INDEX idx_comments_parent ON comments(parent_id); + +CREATE TRIGGER comments_updated_at BEFORE UPDATE ON comments + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260125204900_notifications.sql b/backend/migrations/20260125204900_notifications.sql new file mode 100644 index 0000000..de50ecf --- /dev/null +++ b/backend/migrations/20260125204900_notifications.sql @@ -0,0 +1,14 @@ +-- Notifications table +CREATE TABLE notifications ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + type VARCHAR(50) NOT NULL, + title VARCHAR(255) NOT NULL, + message TEXT, + link VARCHAR(500), + is_read BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_notifications_user ON notifications(user_id); +CREATE INDEX idx_notifications_unread ON notifications(user_id, is_read) WHERE is_read = false; diff --git a/backend/migrations/20260125205600_advanced_voting.sql b/backend/migrations/20260125205600_advanced_voting.sql new file mode 100644 index 0000000..f1ee383 --- /dev/null +++ b/backend/migrations/20260125205600_advanced_voting.sql @@ -0,0 +1,46 @@ +-- Advanced voting methods support + +-- Add voting method enum type +DO $$ BEGIN + CREATE TYPE voting_method_type AS ENUM ('approval', 'ranked_choice', 'quadratic', 'star'); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- Ranked votes table for ranked choice voting +CREATE TABLE IF NOT EXISTS ranked_votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + rank INTEGER NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id, option_id), + UNIQUE(proposal_id, voter_id, rank) +); + +-- Quadratic votes table (stores credit allocation) +CREATE TABLE IF NOT EXISTS quadratic_votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + credits INTEGER NOT NULL CHECK (credits >= 0), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id, option_id) +); + +-- Star rating votes (0-5 stars per option) +CREATE TABLE IF NOT EXISTS star_votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + stars INTEGER NOT NULL CHECK (stars >= 0 AND stars <= 5), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id, option_id) +); + +CREATE INDEX IF NOT EXISTS idx_ranked_votes_proposal ON ranked_votes(proposal_id); +CREATE INDEX IF NOT EXISTS idx_quadratic_votes_proposal ON quadratic_votes(proposal_id); +CREATE INDEX IF NOT EXISTS idx_star_votes_proposal ON star_votes(proposal_id); diff --git a/backend/migrations/20260125211000_public_events.sql b/backend/migrations/20260125211000_public_events.sql new file mode 100644 index 0000000..a250635 --- /dev/null +++ b/backend/migrations/20260125211000_public_events.sql @@ -0,0 +1,13 @@ +CREATE TABLE IF NOT EXISTS public_events ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + actor_user_id UUID REFERENCES users(id) ON DELETE SET NULL, + plugin_name VARCHAR(100), + event_type VARCHAR(100) NOT NULL, + payload JSONB NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_public_events_community ON public_events(community_id); +CREATE INDEX IF NOT EXISTS idx_public_events_created ON public_events(created_at); +CREATE INDEX IF NOT EXISTS idx_public_events_type ON public_events(event_type); diff --git a/backend/migrations/20260126012000_plugin_packages.sql b/backend/migrations/20260126012000_plugin_packages.sql new file mode 100644 index 0000000..9b66766 --- /dev/null +++ b/backend/migrations/20260126012000_plugin_packages.sql @@ -0,0 +1,31 @@ +CREATE TABLE IF NOT EXISTS plugin_packages ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) NOT NULL, + version VARCHAR(20) NOT NULL, + description TEXT, + publisher VARCHAR(200), + source VARCHAR(20) NOT NULL, + registry_url TEXT, + wasm_sha256 VARCHAR(64) NOT NULL, + wasm_bytes BYTEA NOT NULL, + manifest JSONB NOT NULL, + signature BYTEA, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(name, version, publisher, wasm_sha256) +); + +CREATE INDEX IF NOT EXISTS idx_plugin_packages_name ON plugin_packages(name); +CREATE INDEX IF NOT EXISTS idx_plugin_packages_created ON plugin_packages(created_at); + +CREATE TABLE IF NOT EXISTS community_plugin_packages ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + package_id UUID NOT NULL REFERENCES plugin_packages(id) ON DELETE CASCADE, + installed_by UUID REFERENCES users(id) ON DELETE SET NULL, + installed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + is_active BOOLEAN NOT NULL DEFAULT TRUE, + UNIQUE(community_id, package_id) +); + +CREATE INDEX IF NOT EXISTS idx_community_plugin_packages_community ON community_plugin_packages(community_id); +CREATE INDEX IF NOT EXISTS idx_community_plugin_packages_installed_at ON community_plugin_packages(installed_at); diff --git a/backend/migrations/20260126120000_instance_settings.sql b/backend/migrations/20260126120000_instance_settings.sql new file mode 100644 index 0000000..00d9027 --- /dev/null +++ b/backend/migrations/20260126120000_instance_settings.sql @@ -0,0 +1,116 @@ +-- Instance-level settings for platform configuration +-- Determines how the platform operates globally + +CREATE TABLE IF NOT EXISTS instance_settings ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + + -- Setup status + setup_completed BOOLEAN NOT NULL DEFAULT FALSE, + setup_completed_at TIMESTAMPTZ, + setup_completed_by UUID REFERENCES users(id), + + -- Instance identity + instance_name VARCHAR(100) NOT NULL DEFAULT 'Likwid', + instance_description TEXT, + instance_logo_url TEXT, + + -- Platform mode: 'open', 'approval', 'admin_only', 'single_community' + -- open: Any user can create communities + -- approval: Users can request to create communities, admin approves + -- admin_only: Only admins can create communities + -- single_community: Platform is dedicated to one community (no creation UI) + platform_mode VARCHAR(20) NOT NULL DEFAULT 'open', + + -- Single community mode settings + single_community_id UUID REFERENCES communities(id), + + -- Registration settings + registration_enabled BOOLEAN NOT NULL DEFAULT TRUE, + registration_mode VARCHAR(20) NOT NULL DEFAULT 'open', -- 'open', 'invite_only', 'approval' + require_email_verification BOOLEAN NOT NULL DEFAULT TRUE, + + -- Community creation settings (when platform_mode allows) + default_community_visibility VARCHAR(20) NOT NULL DEFAULT 'public', -- 'public', 'private' + allow_private_communities BOOLEAN NOT NULL DEFAULT TRUE, + max_communities_per_user INT, -- NULL = unlimited + + -- Plugin policy defaults + default_plugin_policy VARCHAR(20) NOT NULL DEFAULT 'curated', -- 'open', 'curated', 'admin_only' + allow_unsigned_plugins BOOLEAN NOT NULL DEFAULT FALSE, + + -- Moderation defaults + default_moderation_mode VARCHAR(20) NOT NULL DEFAULT 'standard', -- 'minimal', 'standard', 'strict' + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Ensure only one row exists +CREATE UNIQUE INDEX IF NOT EXISTS idx_instance_settings_singleton ON instance_settings ((TRUE)); + +-- Community-specific settings that override instance defaults +CREATE TABLE IF NOT EXISTS community_settings ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL UNIQUE REFERENCES communities(id) ON DELETE CASCADE, + + -- Membership + membership_mode VARCHAR(20) NOT NULL DEFAULT 'open', -- 'open', 'approval', 'invite_only' + allow_member_invites BOOLEAN NOT NULL DEFAULT TRUE, + max_members INT, -- NULL = unlimited + + -- Content & Moderation + moderation_mode VARCHAR(20) NOT NULL DEFAULT 'standard', -- 'minimal', 'standard', 'strict', 'custom' + require_post_approval BOOLEAN NOT NULL DEFAULT FALSE, + allow_anonymous_viewing BOOLEAN NOT NULL DEFAULT TRUE, + + -- Governance + governance_model VARCHAR(20) NOT NULL DEFAULT 'standard', -- 'standard', 'democratic', 'delegated', 'custom' + voting_threshold_percent INT NOT NULL DEFAULT 50, + proposal_duration_days INT NOT NULL DEFAULT 7, + quorum_percent INT, -- NULL = no quorum required + + -- Plugins + plugin_policy VARCHAR(20) NOT NULL DEFAULT 'inherit', -- 'inherit', 'open', 'curated', 'admin_only', 'disabled' + allowed_plugin_ids UUID[], -- If curated, list of allowed plugin package IDs + + -- Features toggles + features_enabled JSONB NOT NULL DEFAULT '{ + "proposals": true, + "voting": true, + "comments": true, + "notifications": true, + "plugins": true + }'::jsonb, + + -- Custom rules (JSON schema for flexibility) + custom_rules JSONB NOT NULL DEFAULT '[]'::jsonb, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_community_settings_community ON community_settings(community_id); + +-- Insert default instance settings row +INSERT INTO instance_settings (id) +VALUES (uuid_generate_v4()) +ON CONFLICT DO NOTHING; + +-- Trigger to update updated_at +CREATE OR REPLACE FUNCTION update_settings_timestamp() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER instance_settings_updated + BEFORE UPDATE ON instance_settings + FOR EACH ROW EXECUTE FUNCTION update_settings_timestamp(); + +CREATE TRIGGER community_settings_updated + BEFORE UPDATE ON community_settings + FOR EACH ROW EXECUTE FUNCTION update_settings_timestamp(); diff --git a/backend/migrations/20260126121000_admin_columns.sql b/backend/migrations/20260126121000_admin_columns.sql new file mode 100644 index 0000000..8b5dbca --- /dev/null +++ b/backend/migrations/20260126121000_admin_columns.sql @@ -0,0 +1,7 @@ +-- Add admin flag to users and created_by to communities + +ALTER TABLE users ADD COLUMN IF NOT EXISTS is_admin BOOLEAN NOT NULL DEFAULT FALSE; + +ALTER TABLE communities ADD COLUMN IF NOT EXISTS created_by UUID REFERENCES users(id); + +CREATE INDEX IF NOT EXISTS idx_users_is_admin ON users(is_admin) WHERE is_admin = true; diff --git a/backend/migrations/20260126130000_deliberation_system.sql b/backend/migrations/20260126130000_deliberation_system.sql new file mode 100644 index 0000000..ee0cb22 --- /dev/null +++ b/backend/migrations/20260126130000_deliberation_system.sql @@ -0,0 +1,107 @@ +-- Deliberation System +-- Implements structured deliberative democracy: inform → discuss → decide + +-- Deliberation phases +CREATE TYPE deliberation_phase AS ENUM ('drafting', 'informing', 'discussing', 'voting', 'concluded'); + +-- Add deliberation fields to proposals +ALTER TABLE proposals ADD COLUMN deliberation_phase deliberation_phase DEFAULT 'drafting'; +ALTER TABLE proposals ADD COLUMN inform_starts_at TIMESTAMPTZ; +ALTER TABLE proposals ADD COLUMN inform_ends_at TIMESTAMPTZ; +ALTER TABLE proposals ADD COLUMN discuss_starts_at TIMESTAMPTZ; +ALTER TABLE proposals ADD COLUMN discuss_ends_at TIMESTAMPTZ; +ALTER TABLE proposals ADD COLUMN min_read_time_seconds INT DEFAULT 60; +ALTER TABLE proposals ADD COLUMN facilitator_id UUID REFERENCES users(id); + +-- Resources for inform phase (documents, expert opinions, data) +CREATE TABLE proposal_resources ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + title VARCHAR(255) NOT NULL, + resource_type VARCHAR(50) NOT NULL DEFAULT 'document', -- document, video, link, expert_opinion + content TEXT, + url VARCHAR(500), + author_name VARCHAR(255), + sort_order INT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_by UUID REFERENCES users(id) +); + +-- Track who has read the resources (for "read before discuss" feature) +CREATE TABLE proposal_resource_reads ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + resource_id UUID NOT NULL REFERENCES proposal_resources(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + read_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + time_spent_seconds INT DEFAULT 0, + UNIQUE(resource_id, user_id) +); + +-- Discussion groups (small group discussions with facilitators) +CREATE TABLE discussion_groups ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + facilitator_id UUID REFERENCES users(id), + max_members INT DEFAULT 10, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Discussion group members +CREATE TABLE discussion_group_members ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + group_id UUID NOT NULL REFERENCES discussion_groups(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + joined_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(group_id, user_id) +); + +-- Comment quality scoring (for constructive visibility) +ALTER TABLE comments ADD COLUMN IF NOT EXISTS quality_score INT DEFAULT 0; +ALTER TABLE comments ADD COLUMN IF NOT EXISTS is_constructive BOOLEAN DEFAULT TRUE; +ALTER TABLE comments ADD COLUMN IF NOT EXISTS upvotes INT DEFAULT 0; +ALTER TABLE comments ADD COLUMN IF NOT EXISTS downvotes INT DEFAULT 0; + +-- Comment reactions (agree/disagree/insightful/off-topic) +CREATE TABLE comment_reactions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + comment_id UUID NOT NULL REFERENCES comments(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + reaction_type VARCHAR(20) NOT NULL, -- agree, disagree, insightful, off_topic, constructive + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(comment_id, user_id, reaction_type) +); + +-- Proposal sentiment/position tracking (for agreement visualization) +CREATE TABLE proposal_positions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + position VARCHAR(20) NOT NULL, -- strongly_support, support, neutral, oppose, strongly_oppose + reasoning TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, user_id) +); + +-- Add facilitator role to community_members +DO $$ +BEGIN + -- Check if role column needs updating (add facilitator if not exists in check constraint) + -- For now we'll just allow any role value + NULL; +END $$; + +-- Indexes +CREATE INDEX idx_proposal_resources_proposal ON proposal_resources(proposal_id); +CREATE INDEX idx_proposal_resource_reads_resource ON proposal_resource_reads(resource_id); +CREATE INDEX idx_proposal_resource_reads_user ON proposal_resource_reads(user_id); +CREATE INDEX idx_discussion_groups_proposal ON discussion_groups(proposal_id); +CREATE INDEX idx_discussion_group_members_group ON discussion_group_members(group_id); +CREATE INDEX idx_comment_reactions_comment ON comment_reactions(comment_id); +CREATE INDEX idx_proposal_positions_proposal ON proposal_positions(proposal_id); +CREATE INDEX idx_proposals_deliberation_phase ON proposals(deliberation_phase); + +-- Trigger for proposal_positions updated_at +CREATE TRIGGER proposal_positions_updated_at BEFORE UPDATE ON proposal_positions + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126140000_advanced_voting.sql b/backend/migrations/20260126140000_advanced_voting.sql new file mode 100644 index 0000000..9d7a0a7 --- /dev/null +++ b/backend/migrations/20260126140000_advanced_voting.sql @@ -0,0 +1,116 @@ +-- Advanced Voting Methods +-- Implements Schulze, STAR, Quadratic, and Ranked Choice voting + +-- Voting method enum (extend existing) +DO $$ +BEGIN + -- Add new voting methods if they don't exist + ALTER TYPE proposal_status ADD VALUE IF NOT EXISTS 'calculating'; +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- Ranked ballots for Schulze/Ranked Choice +CREATE TABLE ranked_ballots ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id) +); + +-- Individual rankings within a ballot +CREATE TABLE ranked_ballot_entries ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + ballot_id UUID NOT NULL REFERENCES ranked_ballots(id) ON DELETE CASCADE, + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + rank INT NOT NULL, -- 1 = first choice, 2 = second, etc. + UNIQUE(ballot_id, option_id), + UNIQUE(ballot_id, rank) +); + +-- Score ballots for STAR voting +CREATE TABLE score_ballots ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id) +); + +-- Individual scores within a ballot (0-5 scale) +CREATE TABLE score_ballot_entries ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + ballot_id UUID NOT NULL REFERENCES score_ballots(id) ON DELETE CASCADE, + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + score INT NOT NULL CHECK (score >= 0 AND score <= 5), + UNIQUE(ballot_id, option_id) +); + +-- Quadratic voting credits and allocations +CREATE TABLE quadratic_budgets ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voter_id UUID NOT NULL REFERENCES voting_identities(id), + total_credits INT NOT NULL DEFAULT 100, + used_credits INT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, voter_id) +); + +-- Quadratic vote allocations +CREATE TABLE quadratic_allocations ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + budget_id UUID NOT NULL REFERENCES quadratic_budgets(id) ON DELETE CASCADE, + option_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + votes INT NOT NULL DEFAULT 0, -- Actual votes (cost = votes^2) + UNIQUE(budget_id, option_id) +); + +-- Voting results cache (for complex calculations) +CREATE TABLE voting_results ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voting_method VARCHAR(50) NOT NULL, + winner_option_id UUID REFERENCES proposal_options(id), + results_json JSONB NOT NULL, -- Full results including rankings, scores, etc. + calculated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id) +); + +-- Pairwise comparison matrix (for Schulze) +CREATE TABLE pairwise_matrix ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + option_a_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + option_b_id UUID NOT NULL REFERENCES proposal_options(id) ON DELETE CASCADE, + a_over_b INT NOT NULL DEFAULT 0, -- Number of voters who prefer A over B + UNIQUE(proposal_id, option_a_id, option_b_id) +); + +-- Add voting method configuration to proposals +ALTER TABLE proposals ADD COLUMN IF NOT EXISTS voting_config JSONB DEFAULT '{}'; +ALTER TABLE proposals ADD COLUMN IF NOT EXISTS quadratic_credit_budget INT DEFAULT 100; + +-- Indexes +CREATE INDEX idx_ranked_ballots_proposal ON ranked_ballots(proposal_id); +CREATE INDEX idx_ranked_ballot_entries_ballot ON ranked_ballot_entries(ballot_id); +CREATE INDEX idx_score_ballots_proposal ON score_ballots(proposal_id); +CREATE INDEX idx_score_ballot_entries_ballot ON score_ballot_entries(ballot_id); +CREATE INDEX idx_quadratic_budgets_proposal ON quadratic_budgets(proposal_id); +CREATE INDEX idx_quadratic_allocations_budget ON quadratic_allocations(budget_id); +CREATE INDEX idx_voting_results_proposal ON voting_results(proposal_id); +CREATE INDEX idx_pairwise_matrix_proposal ON pairwise_matrix(proposal_id); + +-- Triggers +CREATE TRIGGER ranked_ballots_updated_at BEFORE UPDATE ON ranked_ballots + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER score_ballots_updated_at BEFORE UPDATE ON score_ballots + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER quadratic_budgets_updated_at BEFORE UPDATE ON quadratic_budgets + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126150000_liquid_delegation.sql b/backend/migrations/20260126150000_liquid_delegation.sql new file mode 100644 index 0000000..fe1d5bf --- /dev/null +++ b/backend/migrations/20260126150000_liquid_delegation.sql @@ -0,0 +1,172 @@ +-- Liquid Delegation System +-- Implements fluid, reversible, topic-based vote delegation + +-- Delegation scopes (what the delegation applies to) +CREATE TYPE delegation_scope AS ENUM ('global', 'community', 'topic', 'proposal'); + +-- Topic categories for delegation +CREATE TABLE topics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + slug VARCHAR(100) NOT NULL, + description TEXT, + parent_id UUID REFERENCES topics(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, slug) +); + +-- Vote delegations +CREATE TABLE delegations ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delegator_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + delegate_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + scope delegation_scope NOT NULL DEFAULT 'global', + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + topic_id UUID REFERENCES topics(id) ON DELETE CASCADE, + proposal_id UUID REFERENCES proposals(id) ON DELETE CASCADE, + weight DECIMAL(5,4) NOT NULL DEFAULT 1.0, -- 0.0001 to 1.0 (for fractional delegation) + is_active BOOLEAN NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + revoked_at TIMESTAMPTZ, + -- Prevent self-delegation + CONSTRAINT no_self_delegation CHECK (delegator_id != delegate_id), + -- Ensure scope matches reference + CONSTRAINT scope_community_match CHECK ( + (scope = 'global') OR + (scope = 'community' AND community_id IS NOT NULL) OR + (scope = 'topic' AND topic_id IS NOT NULL) OR + (scope = 'proposal' AND proposal_id IS NOT NULL) + ) +); + +-- Delegation chain cache (for transitive delegations) +CREATE TABLE delegation_chains ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + original_delegator_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + final_delegate_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + chain_path UUID[] NOT NULL, -- Array of user IDs in the chain + chain_length INT NOT NULL, + effective_weight DECIMAL(5,4) NOT NULL, + scope delegation_scope NOT NULL, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + topic_id UUID REFERENCES topics(id) ON DELETE CASCADE, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Delegation activity log (for transparency) +CREATE TABLE delegation_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + delegation_id UUID REFERENCES delegations(id) ON DELETE SET NULL, + delegator_id UUID NOT NULL REFERENCES users(id), + delegate_id UUID NOT NULL REFERENCES users(id), + action VARCHAR(20) NOT NULL, -- 'created', 'updated', 'revoked', 'used' + scope delegation_scope NOT NULL, + community_id UUID REFERENCES communities(id), + topic_id UUID REFERENCES topics(id), + proposal_id UUID REFERENCES proposals(id), + metadata JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Delegated votes cast (track when delegations are used) +CREATE TABLE delegated_votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + original_voter_id UUID NOT NULL REFERENCES voting_identities(id), + delegate_id UUID NOT NULL REFERENCES users(id), + delegation_chain UUID[] NOT NULL, + vote_data JSONB NOT NULL, -- The actual vote (option_ids, scores, etc.) + cast_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, original_voter_id) +); + +-- Delegate profiles (public info about delegates) +CREATE TABLE delegate_profiles ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE UNIQUE, + display_name VARCHAR(100), + bio TEXT, + expertise_topics UUID[], -- Array of topic IDs + accepting_delegations BOOLEAN NOT NULL DEFAULT TRUE, + delegation_policy TEXT, -- How they decide votes + total_delegators INT NOT NULL DEFAULT 0, + total_votes_cast INT NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Add topic reference to proposals +ALTER TABLE proposals ADD COLUMN IF NOT EXISTS topic_id UUID REFERENCES topics(id); + +-- Indexes +CREATE INDEX idx_topics_community ON topics(community_id); +CREATE INDEX idx_topics_parent ON topics(parent_id); +CREATE INDEX idx_delegations_delegator ON delegations(delegator_id); +CREATE INDEX idx_delegations_delegate ON delegations(delegate_id); +CREATE INDEX idx_delegations_active ON delegations(is_active) WHERE is_active = TRUE; +CREATE INDEX idx_delegations_scope ON delegations(scope); +CREATE INDEX idx_delegation_chains_original ON delegation_chains(original_delegator_id); +CREATE INDEX idx_delegation_chains_final ON delegation_chains(final_delegate_id); +CREATE INDEX idx_delegation_log_delegator ON delegation_log(delegator_id); +CREATE INDEX idx_delegation_log_delegate ON delegation_log(delegate_id); +CREATE INDEX idx_delegated_votes_proposal ON delegated_votes(proposal_id); +CREATE INDEX idx_delegate_profiles_accepting ON delegate_profiles(accepting_delegations) WHERE accepting_delegations = TRUE; + +-- Triggers +CREATE TRIGGER delegations_updated_at BEFORE UPDATE ON delegations + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER delegate_profiles_updated_at BEFORE UPDATE ON delegate_profiles + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +-- Function to detect delegation cycles +CREATE OR REPLACE FUNCTION check_delegation_cycle() +RETURNS TRIGGER AS $$ +DECLARE + current_delegate UUID; + visited UUID[]; + max_depth INT := 20; + depth INT := 0; +BEGIN + -- Start from the new delegate + current_delegate := NEW.delegate_id; + visited := ARRAY[NEW.delegator_id]; + + WHILE depth < max_depth LOOP + -- Check if we've reached a cycle + IF current_delegate = ANY(visited) THEN + RAISE EXCEPTION 'Delegation would create a cycle'; + END IF; + + -- Add to visited + visited := visited || current_delegate; + + -- Find next delegate in chain (matching scope) + SELECT d.delegate_id INTO current_delegate + FROM delegations d + WHERE d.delegator_id = current_delegate + AND d.is_active = TRUE + AND d.scope = NEW.scope + AND (NEW.scope = 'global' OR + (NEW.scope = 'community' AND d.community_id = NEW.community_id) OR + (NEW.scope = 'topic' AND d.topic_id = NEW.topic_id) OR + (NEW.scope = 'proposal' AND d.proposal_id = NEW.proposal_id)) + LIMIT 1; + + -- No more delegations in chain + IF current_delegate IS NULL THEN + EXIT; + END IF; + + depth := depth + 1; + END LOOP; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER check_delegation_cycle_trigger + BEFORE INSERT OR UPDATE ON delegations + FOR EACH ROW EXECUTE FUNCTION check_delegation_cycle(); diff --git a/backend/migrations/20260126160000_plugin_registry.sql b/backend/migrations/20260126160000_plugin_registry.sql new file mode 100644 index 0000000..cfd042b --- /dev/null +++ b/backend/migrations/20260126160000_plugin_registry.sql @@ -0,0 +1,101 @@ +-- Plugin Registry System +-- Enables plugin upload, registry install, and community-level plugin management + +-- Plugin sources +CREATE TYPE plugin_source AS ENUM ('builtin', 'upload', 'registry'); + +-- Add source and security fields to plugins table +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS source plugin_source DEFAULT 'builtin'; +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS wasm_hash VARCHAR(64); -- SHA256 of WASM binary +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS signature TEXT; -- Plugin signature for verification +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS signed_by VARCHAR(255); -- Signer identity +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS is_verified BOOLEAN DEFAULT FALSE; +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS manifest JSONB; -- Full plugin manifest +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS permissions JSONB DEFAULT '[]'; -- Required permissions +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS author VARCHAR(255); +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS homepage VARCHAR(500); +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS repository VARCHAR(500); +ALTER TABLE plugins ADD COLUMN IF NOT EXISTS updated_at TIMESTAMPTZ DEFAULT NOW(); + +-- Plugin files storage (for uploaded plugins) +CREATE TABLE plugin_files ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_id UUID NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + file_type VARCHAR(20) NOT NULL, -- 'wasm', 'manifest', 'icon', 'readme' + file_name VARCHAR(255) NOT NULL, + file_size BIGINT NOT NULL, + content_type VARCHAR(100), + storage_path VARCHAR(500) NOT NULL, + uploaded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + uploaded_by UUID REFERENCES users(id) +); + +-- Plugin versions (for upgrade management) +CREATE TABLE plugin_versions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_id UUID NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + version VARCHAR(20) NOT NULL, + changelog TEXT, + wasm_hash VARCHAR(64), + is_current BOOLEAN NOT NULL DEFAULT FALSE, + released_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(plugin_id, version) +); + +-- Plugin reviews/ratings +CREATE TABLE plugin_reviews ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_id UUID NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + rating INT NOT NULL CHECK (rating >= 1 AND rating <= 5), + review TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(plugin_id, user_id) +); + +-- Plugin install log +CREATE TABLE plugin_installs ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_id UUID NOT NULL REFERENCES plugins(id) ON DELETE CASCADE, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + installed_by UUID NOT NULL REFERENCES users(id), + action VARCHAR(20) NOT NULL, -- 'install', 'uninstall', 'update', 'enable', 'disable' + from_version VARCHAR(20), + to_version VARCHAR(20), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Plugin capabilities/permissions +CREATE TABLE plugin_capabilities ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) NOT NULL UNIQUE, + description TEXT, + risk_level VARCHAR(20) NOT NULL DEFAULT 'low', -- 'low', 'medium', 'high' + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Insert default capabilities +INSERT INTO plugin_capabilities (name, description, risk_level) VALUES + ('http_outbound', 'Make HTTP requests to external services', 'high'), + ('database_read', 'Read data from the database', 'medium'), + ('database_write', 'Write data to the database', 'high'), + ('user_data', 'Access user profile data', 'medium'), + ('notifications', 'Send notifications to users', 'low'), + ('hooks_register', 'Register hooks for actions/filters', 'low'), + ('background_jobs', 'Schedule background tasks', 'medium'), + ('file_storage', 'Store and retrieve files', 'medium') +ON CONFLICT (name) DO NOTHING; + +-- Indexes +CREATE INDEX idx_plugin_files_plugin ON plugin_files(plugin_id); +CREATE INDEX idx_plugin_versions_plugin ON plugin_versions(plugin_id); +CREATE INDEX idx_plugin_reviews_plugin ON plugin_reviews(plugin_id); +CREATE INDEX idx_plugin_installs_plugin ON plugin_installs(plugin_id); +CREATE INDEX idx_plugin_installs_community ON plugin_installs(community_id); +CREATE INDEX idx_plugins_source ON plugins(source); +CREATE INDEX idx_plugins_verified ON plugins(is_verified); + +-- Triggers +CREATE TRIGGER plugin_reviews_updated_at BEFORE UPDATE ON plugin_reviews + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126170000_gitlab_integration.sql b/backend/migrations/20260126170000_gitlab_integration.sql new file mode 100644 index 0000000..5d7c5d6 --- /dev/null +++ b/backend/migrations/20260126170000_gitlab_integration.sql @@ -0,0 +1,92 @@ +-- GitLab Integration +-- Enables linking communities to GitLab projects for issue/MR-based governance + +-- GitLab connections (per community) +CREATE TABLE gitlab_connections ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE UNIQUE, + gitlab_url VARCHAR(500) NOT NULL, -- e.g., https://invent.kde.org + project_path VARCHAR(500) NOT NULL, -- e.g., niccolove/likwid + access_token_encrypted TEXT, -- Encrypted access token + webhook_secret VARCHAR(100), + is_active BOOLEAN NOT NULL DEFAULT TRUE, + sync_issues BOOLEAN NOT NULL DEFAULT TRUE, + sync_merge_requests BOOLEAN NOT NULL DEFAULT TRUE, + auto_create_proposals BOOLEAN NOT NULL DEFAULT FALSE, -- Auto-create proposals from issues + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + last_synced_at TIMESTAMPTZ +); + +-- Linked GitLab issues +CREATE TABLE gitlab_issues ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + connection_id UUID NOT NULL REFERENCES gitlab_connections(id) ON DELETE CASCADE, + gitlab_iid INT NOT NULL, -- GitLab issue IID + gitlab_id BIGINT NOT NULL, -- GitLab global issue ID + title VARCHAR(500) NOT NULL, + description TEXT, + state VARCHAR(20) NOT NULL, -- opened, closed + author_username VARCHAR(255), + labels TEXT[], -- Array of label names + proposal_id UUID REFERENCES proposals(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + gitlab_created_at TIMESTAMPTZ, + gitlab_updated_at TIMESTAMPTZ, + UNIQUE(connection_id, gitlab_iid) +); + +-- Linked GitLab merge requests +CREATE TABLE gitlab_merge_requests ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + connection_id UUID NOT NULL REFERENCES gitlab_connections(id) ON DELETE CASCADE, + gitlab_iid INT NOT NULL, + gitlab_id BIGINT NOT NULL, + title VARCHAR(500) NOT NULL, + description TEXT, + state VARCHAR(20) NOT NULL, -- opened, merged, closed + author_username VARCHAR(255), + source_branch VARCHAR(255), + target_branch VARCHAR(255), + labels TEXT[], + proposal_id UUID REFERENCES proposals(id) ON DELETE SET NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + gitlab_created_at TIMESTAMPTZ, + gitlab_updated_at TIMESTAMPTZ, + UNIQUE(connection_id, gitlab_iid) +); + +-- GitLab webhook events log +CREATE TABLE gitlab_webhook_events ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + connection_id UUID NOT NULL REFERENCES gitlab_connections(id) ON DELETE CASCADE, + event_type VARCHAR(50) NOT NULL, -- issue, merge_request, note, etc. + object_kind VARCHAR(50), + action VARCHAR(50), + payload JSONB NOT NULL, + processed BOOLEAN NOT NULL DEFAULT FALSE, + processed_at TIMESTAMPTZ, + error TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Indexes +CREATE INDEX idx_gitlab_connections_community ON gitlab_connections(community_id); +CREATE INDEX idx_gitlab_issues_connection ON gitlab_issues(connection_id); +CREATE INDEX idx_gitlab_issues_proposal ON gitlab_issues(proposal_id); +CREATE INDEX idx_gitlab_mrs_connection ON gitlab_merge_requests(connection_id); +CREATE INDEX idx_gitlab_mrs_proposal ON gitlab_merge_requests(proposal_id); +CREATE INDEX idx_gitlab_webhook_events_connection ON gitlab_webhook_events(connection_id); +CREATE INDEX idx_gitlab_webhook_events_processed ON gitlab_webhook_events(processed) WHERE processed = FALSE; + +-- Triggers +CREATE TRIGGER gitlab_connections_updated_at BEFORE UPDATE ON gitlab_connections + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER gitlab_issues_updated_at BEFORE UPDATE ON gitlab_issues + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER gitlab_mrs_updated_at BEFORE UPDATE ON gitlab_merge_requests + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126180000_roles_permissions.sql b/backend/migrations/20260126180000_roles_permissions.sql new file mode 100644 index 0000000..5db3e57 --- /dev/null +++ b/backend/migrations/20260126180000_roles_permissions.sql @@ -0,0 +1,202 @@ +-- Core Role/Permission System +-- Provides granular access control with default roles and custom role support + +-- Permission categories +CREATE TYPE permission_category AS ENUM ( + 'platform', -- Platform-wide permissions + 'community', -- Community management + 'proposals', -- Proposal lifecycle + 'voting', -- Voting configuration + 'moderation', -- Content moderation + 'plugins', -- Plugin management + 'users', -- User management + 'integrations' -- External integrations +); + +-- Core permissions table +CREATE TABLE permissions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) NOT NULL UNIQUE, + category permission_category NOT NULL, + description TEXT, + is_system BOOLEAN NOT NULL DEFAULT FALSE, -- System permissions can't be deleted + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Roles table (platform-level and community-level) +CREATE TABLE roles ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(100) NOT NULL, + display_name VARCHAR(100) NOT NULL, + description TEXT, + color VARCHAR(7), -- Hex color for UI + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, -- NULL = platform role + is_system BOOLEAN NOT NULL DEFAULT FALSE, -- System roles can't be deleted + is_default BOOLEAN NOT NULL DEFAULT FALSE, -- Default role for new members + priority INT NOT NULL DEFAULT 0, -- Higher = more authority (for display/conflict resolution) + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(name, community_id) +); + +-- Role permissions (many-to-many) +CREATE TABLE role_permissions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE, + permission_id UUID NOT NULL REFERENCES permissions(id) ON DELETE CASCADE, + granted BOOLEAN NOT NULL DEFAULT TRUE, -- Can explicitly deny + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(role_id, permission_id) +); + +-- User roles (many-to-many, scoped to community or platform) +CREATE TABLE user_roles ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, -- NULL = platform role + granted_by UUID REFERENCES users(id), + granted_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + expires_at TIMESTAMPTZ, -- Optional expiration + UNIQUE(user_id, role_id, community_id) +); + +-- Insert default permissions +INSERT INTO permissions (name, category, description, is_system) VALUES + -- Platform permissions + ('platform.admin', 'platform', 'Full platform administration access', TRUE), + ('platform.settings', 'platform', 'Manage platform settings', TRUE), + ('platform.users.view', 'users', 'View all users', TRUE), + ('platform.users.manage', 'users', 'Manage user accounts', TRUE), + ('platform.users.ban', 'users', 'Ban/suspend users', TRUE), + + -- Community permissions + ('community.create', 'community', 'Create new communities', TRUE), + ('community.settings', 'community', 'Manage community settings', TRUE), + ('community.members.view', 'community', 'View community members', TRUE), + ('community.members.manage', 'community', 'Manage community membership', TRUE), + ('community.members.invite', 'community', 'Invite new members', TRUE), + ('community.roles.manage', 'community', 'Create and manage roles', TRUE), + + -- Proposal permissions + ('proposals.create', 'proposals', 'Create proposals', TRUE), + ('proposals.edit.own', 'proposals', 'Edit own proposals', TRUE), + ('proposals.edit.any', 'proposals', 'Edit any proposal', TRUE), + ('proposals.delete.own', 'proposals', 'Delete own proposals', TRUE), + ('proposals.delete.any', 'proposals', 'Delete any proposal', TRUE), + ('proposals.moderate', 'proposals', 'Moderate proposal lifecycle', TRUE), + + -- Voting permissions + ('voting.vote', 'voting', 'Cast votes', TRUE), + ('voting.configure', 'voting', 'Configure voting methods', TRUE), + ('voting.methods.manage', 'voting', 'Enable/disable voting methods', TRUE), + ('voting.results.view', 'voting', 'View detailed voting results', TRUE), + + -- Moderation permissions + ('moderation.comments.edit', 'moderation', 'Edit comments', TRUE), + ('moderation.comments.delete', 'moderation', 'Delete comments', TRUE), + ('moderation.users.warn', 'moderation', 'Warn users', TRUE), + ('moderation.users.mute', 'moderation', 'Mute users', TRUE), + ('moderation.users.kick', 'moderation', 'Remove users from community', TRUE), + ('moderation.log.view', 'moderation', 'View moderation log', TRUE), + + -- Plugin permissions + ('plugins.view', 'plugins', 'View installed plugins', TRUE), + ('plugins.install', 'plugins', 'Install new plugins', TRUE), + ('plugins.configure', 'plugins', 'Configure plugin settings', TRUE), + ('plugins.uninstall', 'plugins', 'Uninstall plugins', TRUE), + + -- Delegation permissions + ('delegation.delegate', 'voting', 'Delegate votes to others', TRUE), + ('delegation.receive', 'voting', 'Receive delegated votes', TRUE), + + -- Integration permissions + ('integrations.view', 'integrations', 'View integrations', TRUE), + ('integrations.configure', 'integrations', 'Configure integrations', TRUE) +ON CONFLICT (name) DO NOTHING; + +-- Insert default platform roles +INSERT INTO roles (name, display_name, description, is_system, priority, community_id) VALUES + ('platform_admin', 'Platform Admin', 'Full platform control', TRUE, 1000, NULL), + ('platform_moderator', 'Platform Moderator', 'Platform-wide moderation', TRUE, 500, NULL), + ('user', 'User', 'Standard registered user', TRUE, 100, NULL) +ON CONFLICT (name, community_id) DO NOTHING; + +-- Assign all permissions to platform_admin +INSERT INTO role_permissions (role_id, permission_id, granted) +SELECT r.id, p.id, TRUE +FROM roles r, permissions p +WHERE r.name = 'platform_admin' AND r.community_id IS NULL +ON CONFLICT (role_id, permission_id) DO NOTHING; + +-- Assign moderation permissions to platform_moderator +INSERT INTO role_permissions (role_id, permission_id, granted) +SELECT r.id, p.id, TRUE +FROM roles r, permissions p +WHERE r.name = 'platform_moderator' AND r.community_id IS NULL + AND p.category IN ('moderation', 'community') +ON CONFLICT (role_id, permission_id) DO NOTHING; + +-- Assign basic permissions to user role +INSERT INTO role_permissions (role_id, permission_id, granted) +SELECT r.id, p.id, TRUE +FROM roles r, permissions p +WHERE r.name = 'user' AND r.community_id IS NULL + AND p.name IN ('community.create', 'proposals.create', 'proposals.edit.own', + 'proposals.delete.own', 'voting.vote', 'delegation.delegate', + 'community.members.view', 'plugins.view', 'voting.results.view') +ON CONFLICT (role_id, permission_id) DO NOTHING; + +-- Indexes +CREATE INDEX idx_permissions_category ON permissions(category); +CREATE INDEX idx_roles_community ON roles(community_id); +CREATE INDEX idx_roles_system ON roles(is_system); +CREATE INDEX idx_role_permissions_role ON role_permissions(role_id); +CREATE INDEX idx_user_roles_user ON user_roles(user_id); +CREATE INDEX idx_user_roles_role ON user_roles(role_id); +CREATE INDEX idx_user_roles_community ON user_roles(community_id); + +-- Triggers +CREATE TRIGGER roles_updated_at BEFORE UPDATE ON roles + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +-- Function to check if user has permission +CREATE OR REPLACE FUNCTION user_has_permission( + p_user_id UUID, + p_permission_name VARCHAR, + p_community_id UUID DEFAULT NULL +) RETURNS BOOLEAN AS $$ +DECLARE + has_perm BOOLEAN := FALSE; +BEGIN + -- Check platform roles first + SELECT EXISTS ( + SELECT 1 FROM user_roles ur + JOIN role_permissions rp ON ur.role_id = rp.role_id + JOIN permissions p ON rp.permission_id = p.id + WHERE ur.user_id = p_user_id + AND p.name = p_permission_name + AND rp.granted = TRUE + AND ur.community_id IS NULL + AND (ur.expires_at IS NULL OR ur.expires_at > NOW()) + ) INTO has_perm; + + IF has_perm THEN RETURN TRUE; END IF; + + -- Check community roles if community_id provided + IF p_community_id IS NOT NULL THEN + SELECT EXISTS ( + SELECT 1 FROM user_roles ur + JOIN role_permissions rp ON ur.role_id = rp.role_id + JOIN permissions p ON rp.permission_id = p.id + WHERE ur.user_id = p_user_id + AND p.name = p_permission_name + AND rp.granted = TRUE + AND ur.community_id = p_community_id + AND (ur.expires_at IS NULL OR ur.expires_at > NOW()) + ) INTO has_perm; + END IF; + + RETURN has_perm; +END; +$$ LANGUAGE plpgsql; diff --git a/backend/migrations/20260126190000_voting_plugins.sql b/backend/migrations/20260126190000_voting_plugins.sql new file mode 100644 index 0000000..a3e2caa --- /dev/null +++ b/backend/migrations/20260126190000_voting_plugins.sql @@ -0,0 +1,200 @@ +-- Voting Methods as Plugins +-- Each voting method is a configurable plugin with its own settings + +-- Voting method plugins (system plugins for voting) +CREATE TABLE voting_method_plugins ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(50) NOT NULL UNIQUE, + display_name VARCHAR(100) NOT NULL, + description TEXT, + icon VARCHAR(50), -- Icon identifier for UI + is_active BOOLEAN NOT NULL DEFAULT TRUE, -- Platform-level activation + is_default BOOLEAN NOT NULL DEFAULT FALSE, -- Default method for new proposals + config_schema JSONB, -- JSON Schema for configuration options + default_config JSONB DEFAULT '{}', + min_options INT DEFAULT 2, + max_options INT, + supports_delegation BOOLEAN NOT NULL DEFAULT TRUE, + complexity_level VARCHAR(20) DEFAULT 'simple', -- simple, moderate, advanced + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Community voting method configuration +CREATE TABLE community_voting_methods ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + voting_method_id UUID NOT NULL REFERENCES voting_method_plugins(id) ON DELETE CASCADE, + is_enabled BOOLEAN NOT NULL DEFAULT TRUE, + is_default BOOLEAN NOT NULL DEFAULT FALSE, + config JSONB DEFAULT '{}', -- Community-specific configuration + allowed_roles UUID[], -- Roles that can use this method (NULL = all) + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, voting_method_id) +); + +-- Insert default voting method plugins +INSERT INTO voting_method_plugins (name, display_name, description, icon, is_active, is_default, config_schema, default_config, complexity_level) VALUES +( + 'approval', + 'Approval Voting', + 'Vote for one or more options. Simple and easy to understand.', + 'check-circle', + TRUE, + TRUE, + '{ + "type": "object", + "properties": { + "max_selections": {"type": "integer", "minimum": 1, "description": "Maximum options a voter can select"}, + "require_selection": {"type": "boolean", "description": "Require at least one selection"} + } + }', + '{"max_selections": null, "require_selection": true}', + 'simple' +), +( + 'ranked_choice', + 'Ranked Choice (IRV)', + 'Rank options in order of preference. Eliminates spoiler effect.', + 'list-ordered', + TRUE, + FALSE, + '{ + "type": "object", + "properties": { + "require_full_ranking": {"type": "boolean", "description": "Require ranking all options"}, + "min_rankings": {"type": "integer", "minimum": 1, "description": "Minimum rankings required"} + } + }', + '{"require_full_ranking": false, "min_rankings": 1}', + 'moderate' +), +( + 'schulze', + 'Schulze Method', + 'Condorcet-consistent pairwise comparison. Best for complex decisions.', + 'git-compare', + TRUE, + FALSE, + '{ + "type": "object", + "properties": { + "show_pairwise_matrix": {"type": "boolean", "description": "Show detailed pairwise comparison results"}, + "allow_ties": {"type": "boolean", "description": "Allow equal rankings"} + } + }', + '{"show_pairwise_matrix": true, "allow_ties": true}', + 'advanced' +), +( + 'star', + 'STAR Voting', + 'Score Then Automatic Runoff. Rate options 0-5 stars.', + 'star', + TRUE, + FALSE, + '{ + "type": "object", + "properties": { + "max_score": {"type": "integer", "minimum": 3, "maximum": 10, "description": "Maximum score (default 5)"}, + "show_runoff": {"type": "boolean", "description": "Show automatic runoff details"} + } + }', + '{"max_score": 5, "show_runoff": true}', + 'moderate' +), +( + 'quadratic', + 'Quadratic Voting', + 'Express intensity of preference. Cost = votes². Good for resource allocation.', + 'trending-up', + TRUE, + FALSE, + '{ + "type": "object", + "properties": { + "credit_budget": {"type": "integer", "minimum": 10, "description": "Credits per voter"}, + "allow_negative": {"type": "boolean", "description": "Allow negative votes (against)"} + } + }', + '{"credit_budget": 100, "allow_negative": false}', + 'advanced' +) +ON CONFLICT (name) DO UPDATE SET + display_name = EXCLUDED.display_name, + description = EXCLUDED.description, + config_schema = EXCLUDED.config_schema, + default_config = EXCLUDED.default_config; + +-- Default plugins registry (for setup wizard) +CREATE TABLE default_plugins ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_name VARCHAR(100) NOT NULL, + plugin_type VARCHAR(50) NOT NULL, -- 'voting', 'integration', 'feature', 'theme' + display_name VARCHAR(100) NOT NULL, + description TEXT, + is_core BOOLEAN NOT NULL DEFAULT FALSE, -- Core plugins can't be disabled + is_recommended BOOLEAN NOT NULL DEFAULT TRUE, -- Recommended for new installs + default_enabled BOOLEAN NOT NULL DEFAULT TRUE, + config_defaults JSONB DEFAULT '{}', + dependencies TEXT[], -- Other plugins this depends on + category VARCHAR(50), + sort_order INT DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Insert default plugins for setup +INSERT INTO default_plugins (plugin_name, plugin_type, display_name, description, is_core, is_recommended, default_enabled, category, sort_order) VALUES + -- Core plugins (always enabled) + ('core.auth', 'feature', 'Authentication', 'User authentication and sessions', TRUE, TRUE, TRUE, 'core', 1), + ('core.communities', 'feature', 'Communities', 'Community management', TRUE, TRUE, TRUE, 'core', 2), + ('core.proposals', 'feature', 'Proposals', 'Proposal creation and management', TRUE, TRUE, TRUE, 'core', 3), + + -- Voting plugins + ('voting.approval', 'voting', 'Approval Voting', 'Simple approval voting', FALSE, TRUE, TRUE, 'voting', 10), + ('voting.ranked_choice', 'voting', 'Ranked Choice', 'Instant runoff voting', FALSE, TRUE, TRUE, 'voting', 11), + ('voting.schulze', 'voting', 'Schulze Method', 'Condorcet voting', FALSE, FALSE, FALSE, 'voting', 12), + ('voting.star', 'voting', 'STAR Voting', 'Score then automatic runoff', FALSE, TRUE, TRUE, 'voting', 13), + ('voting.quadratic', 'voting', 'Quadratic Voting', 'Intensity-weighted voting', FALSE, FALSE, FALSE, 'voting', 14), + + -- Feature plugins + ('feature.delegation', 'feature', 'Liquid Delegation', 'Vote delegation system', FALSE, TRUE, TRUE, 'governance', 20), + ('feature.deliberation', 'feature', 'Deliberation Phases', 'Structured discussion phases', FALSE, TRUE, TRUE, 'governance', 21), + ('feature.moderation', 'feature', 'Moderation Tools', 'Content moderation', FALSE, TRUE, TRUE, 'moderation', 22), + ('feature.notifications', 'feature', 'Notifications', 'User notifications', FALSE, TRUE, TRUE, 'communication', 23), + + -- Integration plugins + ('integration.gitlab', 'integration', 'GitLab Integration', 'Connect to GitLab projects', FALSE, FALSE, FALSE, 'integrations', 30), + ('integration.github', 'integration', 'GitHub Integration', 'Connect to GitHub repositories', FALSE, FALSE, FALSE, 'integrations', 31), + ('integration.matrix', 'integration', 'Matrix Integration', 'Matrix chat notifications', FALSE, FALSE, FALSE, 'integrations', 32), + ('integration.discourse', 'integration', 'Discourse Integration', 'Discourse forum sync', FALSE, FALSE, FALSE, 'integrations', 33) +ON CONFLICT DO NOTHING; + +-- Instance plugin configuration (what's enabled at platform level) +CREATE TABLE instance_plugins ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_name VARCHAR(100) NOT NULL UNIQUE, + is_enabled BOOLEAN NOT NULL DEFAULT TRUE, + config JSONB DEFAULT '{}', + enabled_at TIMESTAMPTZ, + enabled_by UUID REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Indexes +CREATE INDEX idx_voting_method_plugins_active ON voting_method_plugins(is_active); +CREATE INDEX idx_community_voting_methods_community ON community_voting_methods(community_id); +CREATE INDEX idx_default_plugins_type ON default_plugins(plugin_type); +CREATE INDEX idx_instance_plugins_enabled ON instance_plugins(is_enabled); + +-- Triggers +CREATE TRIGGER voting_method_plugins_updated_at BEFORE UPDATE ON voting_method_plugins + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER community_voting_methods_updated_at BEFORE UPDATE ON community_voting_methods + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER instance_plugins_updated_at BEFORE UPDATE ON instance_plugins + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126200000_invitations.sql b/backend/migrations/20260126200000_invitations.sql new file mode 100644 index 0000000..856aa1f --- /dev/null +++ b/backend/migrations/20260126200000_invitations.sql @@ -0,0 +1,132 @@ +-- Invitation system for invite-only registration +-- Supports both platform-level and community-level invitations + +CREATE TABLE invitations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Invitation code (unique, URL-safe) + code VARCHAR(64) UNIQUE NOT NULL, + + -- Who created this invitation + created_by UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + + -- Optional: specific email this invite is for + email VARCHAR(255), + + -- Optional: community-specific invite + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + + -- Invitation metadata + max_uses INTEGER DEFAULT 1, + uses_count INTEGER DEFAULT 0, + + -- Expiration + expires_at TIMESTAMPTZ, + + -- Status + is_active BOOLEAN DEFAULT TRUE, + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + + -- Constraint: can't exceed max uses + CONSTRAINT valid_uses CHECK (uses_count <= max_uses OR max_uses IS NULL) +); + +-- Track invitation usage +CREATE TABLE invitation_uses ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + invitation_id UUID NOT NULL REFERENCES invitations(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + used_at TIMESTAMPTZ DEFAULT NOW(), + + UNIQUE(invitation_id, user_id) +); + +-- Add invitation reference to users table +ALTER TABLE users ADD COLUMN IF NOT EXISTS invited_by UUID REFERENCES invitations(id); + +-- Indexes +CREATE INDEX idx_invitations_code ON invitations(code); +CREATE INDEX idx_invitations_created_by ON invitations(created_by); +CREATE INDEX idx_invitations_community ON invitations(community_id); +CREATE INDEX idx_invitations_email ON invitations(email); +CREATE INDEX idx_invitations_active ON invitations(is_active) WHERE is_active = TRUE; + +-- Function to generate secure invitation code +CREATE OR REPLACE FUNCTION generate_invitation_code() RETURNS VARCHAR(64) AS $$ +DECLARE + chars TEXT := 'ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjkmnpqrstuvwxyz23456789'; + result VARCHAR(64) := ''; + i INTEGER; +BEGIN + FOR i IN 1..16 LOOP + result := result || substr(chars, floor(random() * length(chars) + 1)::integer, 1); + END LOOP; + RETURN result; +END; +$$ LANGUAGE plpgsql; + +-- Function to validate and use an invitation +CREATE OR REPLACE FUNCTION use_invitation( + p_code VARCHAR(64), + p_user_id UUID, + p_email VARCHAR(255) DEFAULT NULL +) RETURNS TABLE( + success BOOLEAN, + invitation_id UUID, + community_id UUID, + error_message TEXT +) AS $$ +DECLARE + v_invite invitations%ROWTYPE; +BEGIN + -- Find the invitation + SELECT * INTO v_invite + FROM invitations + WHERE code = p_code AND is_active = TRUE + FOR UPDATE; + + IF NOT FOUND THEN + RETURN QUERY SELECT FALSE, NULL::UUID, NULL::UUID, 'Invalid invitation code'::TEXT; + RETURN; + END IF; + + -- Check expiration + IF v_invite.expires_at IS NOT NULL AND v_invite.expires_at < NOW() THEN + UPDATE invitations SET is_active = FALSE WHERE id = v_invite.id; + RETURN QUERY SELECT FALSE, NULL::UUID, NULL::UUID, 'Invitation has expired'::TEXT; + RETURN; + END IF; + + -- Check max uses + IF v_invite.max_uses IS NOT NULL AND v_invite.uses_count >= v_invite.max_uses THEN + UPDATE invitations SET is_active = FALSE WHERE id = v_invite.id; + RETURN QUERY SELECT FALSE, NULL::UUID, NULL::UUID, 'Invitation has reached maximum uses'::TEXT; + RETURN; + END IF; + + -- Check email restriction + IF v_invite.email IS NOT NULL AND v_invite.email != p_email THEN + RETURN QUERY SELECT FALSE, NULL::UUID, NULL::UUID, 'This invitation is for a specific email address'::TEXT; + RETURN; + END IF; + + -- Record the use + INSERT INTO invitation_uses (invitation_id, user_id) VALUES (v_invite.id, p_user_id); + + -- Increment uses count + UPDATE invitations + SET uses_count = uses_count + 1, + is_active = CASE + WHEN max_uses IS NOT NULL AND uses_count + 1 >= max_uses THEN FALSE + ELSE is_active + END + WHERE id = v_invite.id; + + -- Update user's invited_by + UPDATE users SET invited_by = v_invite.id WHERE id = p_user_id; + + RETURN QUERY SELECT TRUE, v_invite.id, v_invite.community_id, NULL::TEXT; +END; +$$ LANGUAGE plpgsql; diff --git a/backend/migrations/20260126210000_plugin_kv_store.sql b/backend/migrations/20260126210000_plugin_kv_store.sql new file mode 100644 index 0000000..27930e4 --- /dev/null +++ b/backend/migrations/20260126210000_plugin_kv_store.sql @@ -0,0 +1,162 @@ +-- Plugin Key-Value Store for WASM plugins +-- Provides persistent storage for plugin data with isolation per plugin/community + +CREATE TABLE plugin_kv_store ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Plugin identification + plugin_name VARCHAR(255) NOT NULL, + + -- Optional community scope (NULL = global/instance-level) + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + + -- Key-value pair + key VARCHAR(512) NOT NULL, + value JSONB NOT NULL DEFAULT '{}', + + -- Metadata + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW(), + expires_at TIMESTAMPTZ, -- Optional TTL + + -- Unique constraint per plugin/community/key combination + UNIQUE(plugin_name, community_id, key) +); + +-- Index for fast lookups +CREATE INDEX idx_plugin_kv_plugin ON plugin_kv_store(plugin_name); +CREATE INDEX idx_plugin_kv_community ON plugin_kv_store(community_id); +CREATE INDEX idx_plugin_kv_lookup ON plugin_kv_store(plugin_name, community_id, key); +CREATE INDEX idx_plugin_kv_expires ON plugin_kv_store(expires_at) WHERE expires_at IS NOT NULL; + +-- Plugin events table for event emission +CREATE TABLE plugin_events ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Event source + plugin_name VARCHAR(255) NOT NULL, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + actor_user_id UUID REFERENCES users(id) ON DELETE SET NULL, + + -- Event data + event_name VARCHAR(255) NOT NULL, + payload JSONB NOT NULL DEFAULT '{}', + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + + -- Processing status (for async event handlers) + processed BOOLEAN DEFAULT FALSE, + processed_at TIMESTAMPTZ +); + +-- Index for event queries +CREATE INDEX idx_plugin_events_name ON plugin_events(event_name); +CREATE INDEX idx_plugin_events_plugin ON plugin_events(plugin_name); +CREATE INDEX idx_plugin_events_community ON plugin_events(community_id); +CREATE INDEX idx_plugin_events_unprocessed ON plugin_events(processed) WHERE processed = FALSE; +CREATE INDEX idx_plugin_events_created ON plugin_events(created_at DESC); + +-- Function to get plugin setting +CREATE OR REPLACE FUNCTION get_plugin_setting( + p_plugin_name VARCHAR(255), + p_community_id UUID, + p_key VARCHAR(255) +) RETURNS JSONB AS $$ +DECLARE + v_setting JSONB; +BEGIN + -- First try community-specific setting + IF p_community_id IS NOT NULL THEN + SELECT settings->p_key INTO v_setting + FROM community_plugins cp + JOIN plugins p ON p.id = cp.plugin_id + WHERE p.name = p_plugin_name AND cp.community_id = p_community_id; + + IF v_setting IS NOT NULL THEN + RETURN v_setting; + END IF; + END IF; + + -- Fall back to plugin default settings + SELECT default_settings->p_key INTO v_setting + FROM plugins + WHERE name = p_plugin_name; + + RETURN COALESCE(v_setting, 'null'::jsonb); +END; +$$ LANGUAGE plpgsql; + +-- Function to set KV value +CREATE OR REPLACE FUNCTION plugin_kv_set( + p_plugin_name VARCHAR(255), + p_community_id UUID, + p_key VARCHAR(512), + p_value JSONB, + p_ttl_seconds INTEGER DEFAULT NULL +) RETURNS BOOLEAN AS $$ +DECLARE + v_expires_at TIMESTAMPTZ; +BEGIN + IF p_ttl_seconds IS NOT NULL THEN + v_expires_at := NOW() + (p_ttl_seconds || ' seconds')::interval; + END IF; + + INSERT INTO plugin_kv_store (plugin_name, community_id, key, value, expires_at) + VALUES (p_plugin_name, p_community_id, p_key, p_value, v_expires_at) + ON CONFLICT (plugin_name, community_id, key) + DO UPDATE SET + value = EXCLUDED.value, + updated_at = NOW(), + expires_at = EXCLUDED.expires_at; + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +-- Function to get KV value +CREATE OR REPLACE FUNCTION plugin_kv_get( + p_plugin_name VARCHAR(255), + p_community_id UUID, + p_key VARCHAR(512) +) RETURNS JSONB AS $$ +DECLARE + v_value JSONB; +BEGIN + SELECT value INTO v_value + FROM plugin_kv_store + WHERE plugin_name = p_plugin_name + AND (community_id = p_community_id OR (p_community_id IS NULL AND community_id IS NULL)) + AND key = p_key + AND (expires_at IS NULL OR expires_at > NOW()); + + RETURN v_value; +END; +$$ LANGUAGE plpgsql; + +-- Function to delete KV value +CREATE OR REPLACE FUNCTION plugin_kv_delete( + p_plugin_name VARCHAR(255), + p_community_id UUID, + p_key VARCHAR(512) +) RETURNS BOOLEAN AS $$ +BEGIN + DELETE FROM plugin_kv_store + WHERE plugin_name = p_plugin_name + AND (community_id = p_community_id OR (p_community_id IS NULL AND community_id IS NULL)) + AND key = p_key; + + RETURN FOUND; +END; +$$ LANGUAGE plpgsql; + +-- Cleanup job for expired KV entries (to be run periodically) +CREATE OR REPLACE FUNCTION cleanup_expired_plugin_kv() RETURNS INTEGER AS $$ +DECLARE + deleted_count INTEGER; +BEGIN + DELETE FROM plugin_kv_store WHERE expires_at IS NOT NULL AND expires_at < NOW(); + GET DIAGNOSTICS deleted_count = ROW_COUNT; + RETURN deleted_count; +END; +$$ LANGUAGE plpgsql; diff --git a/backend/migrations/20260126220000_approval_workflows.sql b/backend/migrations/20260126220000_approval_workflows.sql new file mode 100644 index 0000000..9439249 --- /dev/null +++ b/backend/migrations/20260126220000_approval_workflows.sql @@ -0,0 +1,192 @@ +-- Approval workflows for user registration and community creation +-- Supports "approval" mode for registration_mode and platform_mode settings + +-- Pending user registrations (for approval mode) +CREATE TABLE pending_registrations ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- User data (not yet in users table) + username VARCHAR(255) NOT NULL, + email VARCHAR(255) NOT NULL, + password_hash VARCHAR(255) NOT NULL, + display_name VARCHAR(255), + + -- Invitation used (if any) + invitation_id UUID REFERENCES invitations(id) ON DELETE SET NULL, + + -- Approval workflow + status VARCHAR(50) DEFAULT 'pending' CHECK (status IN ('pending', 'approved', 'rejected')), + reviewed_by UUID REFERENCES users(id) ON DELETE SET NULL, + reviewed_at TIMESTAMPTZ, + rejection_reason TEXT, + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + expires_at TIMESTAMPTZ DEFAULT NOW() + INTERVAL '7 days', + + -- Prevent duplicates + UNIQUE(username), + UNIQUE(email) +); + +-- Pending community creation requests (for approval mode) +CREATE TABLE pending_communities ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Community data (not yet in communities table) + name VARCHAR(255) NOT NULL, + slug VARCHAR(255) NOT NULL, + description TEXT, + + -- Requester + requested_by UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + + -- Approval workflow + status VARCHAR(50) DEFAULT 'pending' CHECK (status IN ('pending', 'approved', 'rejected')), + reviewed_by UUID REFERENCES users(id) ON DELETE SET NULL, + reviewed_at TIMESTAMPTZ, + rejection_reason TEXT, + + -- Timestamps + created_at TIMESTAMPTZ DEFAULT NOW(), + + -- Prevent duplicate requests + UNIQUE(slug) +); + +-- Indexes +CREATE INDEX idx_pending_registrations_status ON pending_registrations(status); +CREATE INDEX idx_pending_registrations_email ON pending_registrations(email); +CREATE INDEX idx_pending_registrations_created ON pending_registrations(created_at DESC); + +CREATE INDEX idx_pending_communities_status ON pending_communities(status); +CREATE INDEX idx_pending_communities_requested_by ON pending_communities(requested_by); +CREATE INDEX idx_pending_communities_created ON pending_communities(created_at DESC); + +-- Function to approve a registration +CREATE OR REPLACE FUNCTION approve_registration( + p_pending_id UUID, + p_reviewer_id UUID +) RETURNS UUID AS $$ +DECLARE + v_pending pending_registrations%ROWTYPE; + v_user_id UUID; + v_user_count BIGINT; +BEGIN + -- Get pending registration + SELECT * INTO v_pending FROM pending_registrations WHERE id = p_pending_id AND status = 'pending'; + IF NOT FOUND THEN + RAISE EXCEPTION 'Pending registration not found or already processed'; + END IF; + + -- Check if expired + IF v_pending.expires_at < NOW() THEN + UPDATE pending_registrations SET status = 'rejected', rejection_reason = 'Expired' WHERE id = p_pending_id; + RAISE EXCEPTION 'Registration request has expired'; + END IF; + + -- Check if first user (should be admin) + SELECT COUNT(*) INTO v_user_count FROM users; + + -- Create the user + INSERT INTO users (username, email, password_hash, display_name, is_admin, invited_by) + VALUES (v_pending.username, v_pending.email, v_pending.password_hash, v_pending.display_name, v_user_count = 0, v_pending.invitation_id) + RETURNING id INTO v_user_id; + + -- Use invitation if provided + IF v_pending.invitation_id IS NOT NULL THEN + PERFORM use_invitation( + (SELECT code FROM invitations WHERE id = v_pending.invitation_id), + v_user_id, + v_pending.email + ); + END IF; + + -- Mark as approved + UPDATE pending_registrations + SET status = 'approved', reviewed_by = p_reviewer_id, reviewed_at = NOW() + WHERE id = p_pending_id; + + RETURN v_user_id; +END; +$$ LANGUAGE plpgsql; + +-- Function to reject a registration +CREATE OR REPLACE FUNCTION reject_registration( + p_pending_id UUID, + p_reviewer_id UUID, + p_reason TEXT DEFAULT NULL +) RETURNS BOOLEAN AS $$ +BEGIN + UPDATE pending_registrations + SET status = 'rejected', + reviewed_by = p_reviewer_id, + reviewed_at = NOW(), + rejection_reason = p_reason + WHERE id = p_pending_id AND status = 'pending'; + + RETURN FOUND; +END; +$$ LANGUAGE plpgsql; + +-- Function to approve a community +CREATE OR REPLACE FUNCTION approve_community( + p_pending_id UUID, + p_reviewer_id UUID +) RETURNS UUID AS $$ +DECLARE + v_pending pending_communities%ROWTYPE; + v_community_id UUID; +BEGIN + -- Get pending community + SELECT * INTO v_pending FROM pending_communities WHERE id = p_pending_id AND status = 'pending'; + IF NOT FOUND THEN + RAISE EXCEPTION 'Pending community not found or already processed'; + END IF; + + -- Create the community + INSERT INTO communities (name, slug, description, created_by, is_active) + VALUES (v_pending.name, v_pending.slug, v_pending.description, v_pending.requested_by, true) + RETURNING id INTO v_community_id; + + -- Add requester as admin + INSERT INTO community_members (community_id, user_id, role) + VALUES (v_community_id, v_pending.requested_by, 'admin'); + + -- Mark as approved + UPDATE pending_communities + SET status = 'approved', reviewed_by = p_reviewer_id, reviewed_at = NOW() + WHERE id = p_pending_id; + + RETURN v_community_id; +END; +$$ LANGUAGE plpgsql; + +-- Function to reject a community +CREATE OR REPLACE FUNCTION reject_community( + p_pending_id UUID, + p_reviewer_id UUID, + p_reason TEXT DEFAULT NULL +) RETURNS BOOLEAN AS $$ +BEGIN + UPDATE pending_communities + SET status = 'rejected', + reviewed_by = p_reviewer_id, + reviewed_at = NOW(), + rejection_reason = p_reason + WHERE id = p_pending_id AND status = 'pending'; + + RETURN FOUND; +END; +$$ LANGUAGE plpgsql; + +-- Cleanup expired pending registrations (to be run periodically) +CREATE OR REPLACE FUNCTION cleanup_expired_pending_registrations() RETURNS INTEGER AS $$ +DECLARE + deleted_count INTEGER; +BEGIN + DELETE FROM pending_registrations WHERE expires_at < NOW() AND status = 'pending'; + GET DIAGNOSTICS deleted_count = ROW_COUNT; + RETURN deleted_count; +END; +$$ LANGUAGE plpgsql; diff --git a/backend/migrations/20260126230000_vote_reproducibility.sql b/backend/migrations/20260126230000_vote_reproducibility.sql new file mode 100644 index 0000000..fb075a9 --- /dev/null +++ b/backend/migrations/20260126230000_vote_reproducibility.sql @@ -0,0 +1,293 @@ +-- Vote Reproducibility: Bind votes to plugin versions for audit integrity +-- This ensures votes can always be re-verified using the exact logic that was active + +-- Add plugin version tracking to proposals +ALTER TABLE proposals ADD COLUMN IF NOT EXISTS voting_plugin_version VARCHAR(20); +ALTER TABLE proposals ADD COLUMN IF NOT EXISTS voting_plugin_hash VARCHAR(64); + +-- Archive of plugin WASM binaries for reproducibility +CREATE TABLE IF NOT EXISTS plugin_wasm_archive ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + plugin_name VARCHAR(100) NOT NULL, + version VARCHAR(20) NOT NULL, + wasm_binary BYTEA, -- Actual WASM code (nullable for built-in methods) + wasm_hash VARCHAR(64) NOT NULL, + config_schema JSONB, -- Schema at time of archive + algorithm_description TEXT, -- Human-readable description of the algorithm + archived_at TIMESTAMPTZ DEFAULT NOW(), + archived_by UUID REFERENCES users(id), + UNIQUE(plugin_name, version) +); + +-- Vote audit records with version info +CREATE TABLE IF NOT EXISTS vote_audit ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + voting_method VARCHAR(50) NOT NULL, + plugin_version VARCHAR(20) NOT NULL, + plugin_hash VARCHAR(64), + + -- Snapshot of inputs + options_snapshot JSONB NOT NULL, -- Options at time of vote close + votes_count INT NOT NULL, + voters_count INT NOT NULL, + + -- Calculated results + results_snapshot JSONB NOT NULL, -- Full results at time of close + winner_option_id UUID, + + -- Verification + input_hash VARCHAR(64) NOT NULL, -- SHA256 of (options + votes) + result_hash VARCHAR(64) NOT NULL, -- SHA256 of results + + -- Metadata + closed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + verified_at TIMESTAMPTZ, + verified_by UUID REFERENCES users(id) +); + +-- Function to record plugin version when voting starts +CREATE OR REPLACE FUNCTION record_voting_plugin_version() +RETURNS TRIGGER AS $$ +DECLARE + v_version VARCHAR(20); + v_hash VARCHAR(64); +BEGIN + -- Only trigger when transitioning TO voting status + IF NEW.status = 'voting' AND (OLD.status IS NULL OR OLD.status != 'voting') THEN + -- Get current version of the voting method plugin + SELECT + COALESCE(pv.version, '1.0.0'), + COALESCE(pv.wasm_hash, md5(vmp.name)::varchar) + INTO v_version, v_hash + FROM voting_method_plugins vmp + LEFT JOIN plugin_versions pv ON pv.plugin_id = vmp.id AND pv.is_current = TRUE + WHERE vmp.name = NEW.voting_method; + + -- If no version found, use built-in default + IF v_version IS NULL THEN + v_version := '1.0.0-builtin'; + v_hash := md5(NEW.voting_method)::varchar; + END IF; + + NEW.voting_plugin_version := v_version; + NEW.voting_plugin_hash := v_hash; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Create trigger for version recording +DROP TRIGGER IF EXISTS record_voting_plugin_version_trigger ON proposals; +CREATE TRIGGER record_voting_plugin_version_trigger + BEFORE UPDATE ON proposals + FOR EACH ROW EXECUTE FUNCTION record_voting_plugin_version(); + +-- Function to create vote audit record when voting closes +CREATE OR REPLACE FUNCTION create_vote_audit_record() +RETURNS TRIGGER AS $$ +DECLARE + v_options JSONB; + v_results JSONB; + v_votes_count INT; + v_voters_count INT; + v_winner_id UUID; + v_input_hash VARCHAR(64); + v_result_hash VARCHAR(64); +BEGIN + -- Only trigger when transitioning TO closed status + IF NEW.status = 'closed' AND OLD.status = 'voting' THEN + -- Gather options snapshot + SELECT jsonb_agg(jsonb_build_object( + 'id', po.id, + 'label', po.label, + 'sort_order', po.sort_order + ) ORDER BY po.sort_order) + INTO v_options + FROM proposal_options po + WHERE po.proposal_id = NEW.id; + + -- Count votes and voters + SELECT COUNT(*), COUNT(DISTINCT voter_id) + INTO v_votes_count, v_voters_count + FROM votes + WHERE proposal_id = NEW.id; + + -- Get winner (option with most votes) + SELECT option_id INTO v_winner_id + FROM votes + WHERE proposal_id = NEW.id + GROUP BY option_id + ORDER BY COUNT(*) DESC + LIMIT 1; + + -- Build results snapshot + SELECT jsonb_agg(jsonb_build_object( + 'option_id', r.option_id, + 'label', po.label, + 'vote_count', r.vote_count + ) ORDER BY r.vote_count DESC) + INTO v_results + FROM ( + SELECT option_id, COUNT(*) as vote_count + FROM votes + WHERE proposal_id = NEW.id + GROUP BY option_id + ) r + JOIN proposal_options po ON po.id = r.option_id; + + -- Calculate hashes for integrity + v_input_hash := md5(v_options::text || v_votes_count::text)::varchar; + v_result_hash := md5(COALESCE(v_results::text, ''))::varchar; + + -- Insert audit record + INSERT INTO vote_audit ( + proposal_id, voting_method, plugin_version, plugin_hash, + options_snapshot, votes_count, voters_count, + results_snapshot, winner_option_id, + input_hash, result_hash, closed_at + ) VALUES ( + NEW.id, NEW.voting_method, + COALESCE(NEW.voting_plugin_version, '1.0.0-builtin'), + NEW.voting_plugin_hash, + COALESCE(v_options, '[]'::jsonb), v_votes_count, v_voters_count, + COALESCE(v_results, '[]'::jsonb), v_winner_id, + v_input_hash, v_result_hash, NOW() + ); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Create trigger for audit record +DROP TRIGGER IF EXISTS create_vote_audit_trigger ON proposals; +CREATE TRIGGER create_vote_audit_trigger + AFTER UPDATE ON proposals + FOR EACH ROW EXECUTE FUNCTION create_vote_audit_record(); + +-- Seed archive with built-in voting methods +INSERT INTO plugin_wasm_archive (plugin_name, version, wasm_hash, algorithm_description) +VALUES + ('approval', '1.0.0-builtin', md5('approval')::varchar, + 'Approval Voting: Voters select any number of options. Winner is option with most approvals.'), + ('ranked_choice', '1.0.0-builtin', md5('ranked_choice')::varchar, + 'Ranked Choice (IRV): Voters rank options. Lowest-ranked eliminated iteratively until majority.'), + ('schulze', '1.0.0-builtin', md5('schulze')::varchar, + 'Schulze Method: Condorcet-consistent pairwise comparison. Finds strongest path winner.'), + ('star', '1.0.0-builtin', md5('star')::varchar, + 'STAR Voting: Score Then Automatic Runoff. Top two scorers face head-to-head runoff.'), + ('quadratic', '1.0.0-builtin', md5('quadratic')::varchar, + 'Quadratic Voting: Cost = votes². Allows expressing preference intensity with budget constraint.') +ON CONFLICT (plugin_name, version) DO NOTHING; + +-- Indexes for audit queries +CREATE INDEX IF NOT EXISTS idx_vote_audit_proposal ON vote_audit(proposal_id); +CREATE INDEX IF NOT EXISTS idx_vote_audit_closed ON vote_audit(closed_at); +CREATE INDEX IF NOT EXISTS idx_vote_audit_method ON vote_audit(voting_method); +CREATE INDEX IF NOT EXISTS idx_plugin_archive_name ON plugin_wasm_archive(plugin_name); + +-- Function to verify a past vote result +CREATE OR REPLACE FUNCTION verify_vote_result(p_proposal_id UUID) +RETURNS TABLE( + is_valid BOOLEAN, + recorded_hash VARCHAR(64), + computed_hash VARCHAR(64), + details TEXT +) AS $$ +DECLARE + v_audit vote_audit%ROWTYPE; + v_current_results JSONB; + v_computed_hash VARCHAR(64); +BEGIN + -- Get audit record + SELECT * INTO v_audit FROM vote_audit WHERE proposal_id = p_proposal_id; + + IF NOT FOUND THEN + RETURN QUERY SELECT FALSE, NULL::VARCHAR, NULL::VARCHAR, 'No audit record found'; + RETURN; + END IF; + + -- Recompute results from current data + SELECT jsonb_agg(jsonb_build_object( + 'option_id', r.option_id, + 'label', po.label, + 'vote_count', r.vote_count + ) ORDER BY r.vote_count DESC) + INTO v_current_results + FROM ( + SELECT option_id, COUNT(*) as vote_count + FROM votes + WHERE proposal_id = p_proposal_id + GROUP BY option_id + ) r + JOIN proposal_options po ON po.id = r.option_id; + + v_computed_hash := md5(COALESCE(v_current_results::text, ''))::varchar; + + -- Compare + IF v_computed_hash = v_audit.result_hash THEN + RETURN QUERY SELECT TRUE, v_audit.result_hash, v_computed_hash, 'Vote results verified successfully'; + ELSE + RETURN QUERY SELECT FALSE, v_audit.result_hash, v_computed_hash, 'Vote results do not match audit record'; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- Historical voting power reconstruction +CREATE OR REPLACE FUNCTION get_voting_power_at( + p_user_id UUID, + p_community_id UUID, + p_at_time TIMESTAMPTZ +) RETURNS TABLE( + delegator_id UUID, + delegator_username VARCHAR, + effective_weight DECIMAL, + chain_depth INT +) AS $$ +BEGIN + RETURN QUERY + WITH RECURSIVE delegation_tree AS ( + -- Base: direct delegations to this user at the given time + SELECT + d.delegator_id, + d.delegate_id, + d.weight, + 1 as depth, + ARRAY[d.delegator_id] as path + FROM delegations d + WHERE d.delegate_id = p_user_id + AND (d.community_id = p_community_id OR d.scope = 'global') + AND d.is_active = TRUE + AND d.created_at <= p_at_time + AND (d.revoked_at IS NULL OR d.revoked_at > p_at_time) + + UNION ALL + + -- Recursive: delegations to people who delegated to us + SELECT + d2.delegator_id, + d2.delegate_id, + dt.weight * d2.weight, + dt.depth + 1, + dt.path || d2.delegator_id + FROM delegation_tree dt + JOIN delegations d2 ON d2.delegate_id = dt.delegator_id + WHERE dt.depth < 20 -- Max chain depth + AND NOT (d2.delegator_id = ANY(dt.path)) -- Prevent cycles + AND (d2.community_id = p_community_id OR d2.scope = 'global') + AND d2.is_active = TRUE + AND d2.created_at <= p_at_time + AND (d2.revoked_at IS NULL OR d2.revoked_at > p_at_time) + ) + SELECT + dt.delegator_id, + u.username, + dt.weight::DECIMAL, + dt.depth + FROM delegation_tree dt + JOIN users u ON u.id = dt.delegator_id + ORDER BY dt.depth, dt.weight DESC; +END; +$$ LANGUAGE plpgsql; diff --git a/backend/migrations/20260126240000_topic_voting_methods.sql b/backend/migrations/20260126240000_topic_voting_methods.sql new file mode 100644 index 0000000..9e45a10 --- /dev/null +++ b/backend/migrations/20260126240000_topic_voting_methods.sql @@ -0,0 +1,228 @@ +-- Topic-based Voting Method Assignment +-- Allows different voting methods for different topics within a community + +-- Topic-specific voting method configuration +CREATE TABLE IF NOT EXISTS topic_voting_methods ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + topic_id UUID NOT NULL REFERENCES topics(id) ON DELETE CASCADE, + voting_method_id UUID NOT NULL REFERENCES voting_method_plugins(id) ON DELETE CASCADE, + is_enabled BOOLEAN NOT NULL DEFAULT TRUE, + is_default BOOLEAN NOT NULL DEFAULT FALSE, + config JSONB DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(topic_id, voting_method_id) +); + +-- Voting phase configurations for granular control +CREATE TABLE IF NOT EXISTS voting_phase_configs ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + + -- Can be applied at community or topic level + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + topic_id UUID REFERENCES topics(id) ON DELETE CASCADE, + + -- Phase durations (in hours, NULL = unlimited/manual) + inform_duration_hours INT, + discuss_duration_hours INT, + voting_duration_hours INT, + + -- Phase requirements + require_inform_phase BOOLEAN NOT NULL DEFAULT FALSE, + require_discuss_phase BOOLEAN NOT NULL DEFAULT TRUE, + min_discussion_comments INT DEFAULT 0, + min_read_time_seconds INT DEFAULT 0, + + -- Quorum settings + quorum_type VARCHAR(20) DEFAULT 'none', -- 'none', 'percentage', 'absolute' + quorum_value INT DEFAULT 0, -- percentage (0-100) or absolute number + + -- Auto-transitions + auto_start_discussion BOOLEAN NOT NULL DEFAULT FALSE, + auto_start_voting BOOLEAN NOT NULL DEFAULT FALSE, + auto_close_voting BOOLEAN NOT NULL DEFAULT TRUE, + + -- Notifications + notify_phase_changes BOOLEAN NOT NULL DEFAULT TRUE, + notify_before_close_hours INT DEFAULT 24, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Ensure only one of community_id or topic_id is set (or neither for defaults) + CONSTRAINT phase_config_scope CHECK ( + (community_id IS NOT NULL AND topic_id IS NULL) OR + (community_id IS NULL AND topic_id IS NOT NULL) OR + (community_id IS NULL AND topic_id IS NULL) + ) +); + +-- Phase transition log for audit +CREATE TABLE IF NOT EXISTS phase_transitions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + from_phase VARCHAR(20), + to_phase VARCHAR(20) NOT NULL, + triggered_by VARCHAR(20) NOT NULL, -- 'manual', 'auto', 'system' + triggered_by_user_id UUID REFERENCES users(id), + metadata JSONB, + transitioned_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Function to get effective voting method for a proposal +CREATE OR REPLACE FUNCTION get_proposal_voting_method(p_proposal_id UUID) +RETURNS TABLE( + method_name VARCHAR, + method_id UUID, + config JSONB +) AS $$ +DECLARE + v_topic_id UUID; + v_community_id UUID; +BEGIN + -- Get proposal's topic and community + SELECT topic_id, community_id INTO v_topic_id, v_community_id + FROM proposals WHERE id = p_proposal_id; + + -- Priority: Topic default > Community default > Platform default + + -- Try topic-level + IF v_topic_id IS NOT NULL THEN + RETURN QUERY + SELECT vm.name, vm.id, tvm.config + FROM topic_voting_methods tvm + JOIN voting_method_plugins vm ON vm.id = tvm.voting_method_id + WHERE tvm.topic_id = v_topic_id AND tvm.is_default = TRUE AND tvm.is_enabled = TRUE + LIMIT 1; + + IF FOUND THEN RETURN; END IF; + END IF; + + -- Try community-level + RETURN QUERY + SELECT vm.name, vm.id, cvm.config + FROM community_voting_methods cvm + JOIN voting_method_plugins vm ON vm.id = cvm.voting_method_id + WHERE cvm.community_id = v_community_id AND cvm.is_default = TRUE AND cvm.is_enabled = TRUE + LIMIT 1; + + IF FOUND THEN RETURN; END IF; + + -- Fall back to platform default + RETURN QUERY + SELECT vm.name, vm.id, vm.default_config + FROM voting_method_plugins vm + WHERE vm.is_default = TRUE AND vm.is_active = TRUE + LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +-- Function to get effective phase config for a proposal +CREATE OR REPLACE FUNCTION get_proposal_phase_config(p_proposal_id UUID) +RETURNS voting_phase_configs AS $$ +DECLARE + v_topic_id UUID; + v_community_id UUID; + v_config voting_phase_configs; +BEGIN + -- Get proposal's topic and community + SELECT topic_id, community_id INTO v_topic_id, v_community_id + FROM proposals WHERE id = p_proposal_id; + + -- Try topic-level config + IF v_topic_id IS NOT NULL THEN + SELECT * INTO v_config + FROM voting_phase_configs + WHERE topic_id = v_topic_id + LIMIT 1; + + IF FOUND THEN RETURN v_config; END IF; + END IF; + + -- Try community-level config + SELECT * INTO v_config + FROM voting_phase_configs + WHERE community_id = v_community_id + LIMIT 1; + + IF FOUND THEN RETURN v_config; END IF; + + -- Return default config (all NULLs will use system defaults) + SELECT * INTO v_config + FROM voting_phase_configs + WHERE community_id IS NULL AND topic_id IS NULL + LIMIT 1; + + RETURN v_config; +END; +$$ LANGUAGE plpgsql; + +-- Function to check if proposal meets quorum +CREATE OR REPLACE FUNCTION check_proposal_quorum(p_proposal_id UUID) +RETURNS BOOLEAN AS $$ +DECLARE + v_config voting_phase_configs; + v_community_id UUID; + v_member_count INT; + v_vote_count INT; + v_required INT; +BEGIN + v_config := get_proposal_phase_config(p_proposal_id); + + -- No quorum requirement + IF v_config.quorum_type IS NULL OR v_config.quorum_type = 'none' THEN + RETURN TRUE; + END IF; + + -- Get community and counts + SELECT community_id INTO v_community_id FROM proposals WHERE id = p_proposal_id; + + SELECT COUNT(*) INTO v_member_count + FROM community_members WHERE community_id = v_community_id; + + SELECT COUNT(DISTINCT voter_id) INTO v_vote_count + FROM votes WHERE proposal_id = p_proposal_id; + + -- Calculate required votes + IF v_config.quorum_type = 'percentage' THEN + v_required := CEIL(v_member_count * v_config.quorum_value / 100.0); + ELSE -- absolute + v_required := v_config.quorum_value; + END IF; + + RETURN v_vote_count >= v_required; +END; +$$ LANGUAGE plpgsql; + +-- Insert default phase config +INSERT INTO voting_phase_configs ( + require_inform_phase, + require_discuss_phase, + min_discussion_comments, + auto_start_discussion, + auto_start_voting, + auto_close_voting, + notify_phase_changes +) VALUES ( + FALSE, + TRUE, + 0, + FALSE, + FALSE, + TRUE, + TRUE +) ON CONFLICT DO NOTHING; + +-- Indexes +CREATE INDEX IF NOT EXISTS idx_topic_voting_methods_topic ON topic_voting_methods(topic_id); +CREATE INDEX IF NOT EXISTS idx_voting_phase_configs_community ON voting_phase_configs(community_id); +CREATE INDEX IF NOT EXISTS idx_voting_phase_configs_topic ON voting_phase_configs(topic_id); +CREATE INDEX IF NOT EXISTS idx_phase_transitions_proposal ON phase_transitions(proposal_id); +CREATE INDEX IF NOT EXISTS idx_phase_transitions_time ON phase_transitions(transitioned_at); + +-- Triggers +CREATE TRIGGER topic_voting_methods_updated_at BEFORE UPDATE ON topic_voting_methods + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); + +CREATE TRIGGER voting_phase_configs_updated_at BEFORE UPDATE ON voting_phase_configs + FOR EACH ROW EXECUTE FUNCTION update_updated_at(); diff --git a/backend/migrations/20260126250000_moderation_ledger.sql b/backend/migrations/20260126250000_moderation_ledger.sql new file mode 100644 index 0000000..3ba6773 --- /dev/null +++ b/backend/migrations/20260126250000_moderation_ledger.sql @@ -0,0 +1,554 @@ +-- Moderation Ledger Plugin +-- Immutable, cryptographically-chained log of all moderation decisions +-- This plugin is NON-DEACTIVATABLE by design (core transparency requirement) + +-- ============================================================================ +-- CORE LEDGER TABLE +-- ============================================================================ + +-- Moderation action types +CREATE TYPE moderation_action_type AS ENUM ( + -- Content moderation + 'content_remove', + 'content_hide', + 'content_restore', + 'content_edit', + 'content_flag', + 'content_unflag', + + -- User moderation + 'user_warn', + 'user_mute', + 'user_unmute', + 'user_suspend', + 'user_unsuspend', + 'user_ban', + 'user_unban', + 'user_role_change', + + -- Community moderation + 'community_setting_change', + 'community_rule_add', + 'community_rule_edit', + 'community_rule_remove', + + -- Proposal/voting moderation + 'proposal_close', + 'proposal_reopen', + 'proposal_archive', + 'vote_invalidate', + 'vote_restore', + + -- Escalation + 'escalate_to_admin', + 'escalate_to_community', + 'appeal_received', + 'appeal_resolved' +); + +-- The immutable ledger entries +CREATE TABLE moderation_ledger ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Sequence for ordering (monotonically increasing per community) + sequence_number BIGINT NOT NULL, + + -- Context + community_id UUID REFERENCES communities(id) ON DELETE SET NULL, + + -- Who took the action + actor_user_id UUID NOT NULL REFERENCES users(id), + actor_role TEXT NOT NULL, -- Role at time of action (preserved for history) + + -- What action was taken + action_type moderation_action_type NOT NULL, + + -- Target of the action + target_type TEXT NOT NULL, -- 'user', 'proposal', 'comment', 'community', etc. + target_id UUID NOT NULL, + target_snapshot JSONB, -- Snapshot of target state before action (for context) + + -- Decision details + reason TEXT NOT NULL, -- Required justification + rule_reference TEXT, -- Which community rule was violated (if applicable) + evidence JSONB, -- Links, screenshots (hashed references), reports + + -- Duration (for temporary actions) + duration_hours INTEGER, -- NULL = permanent + expires_at TIMESTAMPTZ, + + -- Voting context (for community-voted decisions) + decision_type TEXT NOT NULL DEFAULT 'unilateral', -- 'unilateral', 'voted', 'automated' + vote_proposal_id UUID REFERENCES proposals(id), + vote_result JSONB, -- Summary of vote if applicable + + -- Cryptographic chain + previous_hash TEXT NOT NULL, -- SHA-256 of previous entry (or genesis hash) + entry_hash TEXT NOT NULL, -- SHA-256 of this entry's content + + -- Timestamps + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Prevent modifications + CONSTRAINT ledger_immutable CHECK (true) -- Symbolic; real protection via triggers +); + +-- Unique sequence per community +CREATE UNIQUE INDEX idx_ledger_community_sequence + ON moderation_ledger(community_id, sequence_number); + +-- Fast lookups +CREATE INDEX idx_ledger_actor ON moderation_ledger(actor_user_id); +CREATE INDEX idx_ledger_target ON moderation_ledger(target_type, target_id); +CREATE INDEX idx_ledger_action ON moderation_ledger(action_type); +CREATE INDEX idx_ledger_created ON moderation_ledger(created_at); +CREATE INDEX idx_ledger_community ON moderation_ledger(community_id); + +-- Hash chain verification index +CREATE INDEX idx_ledger_hash_chain ON moderation_ledger(community_id, previous_hash); + +-- ============================================================================ +-- GENESIS ENTRIES (one per community + one global) +-- ============================================================================ + +CREATE TABLE ledger_genesis ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + community_id UUID UNIQUE REFERENCES communities(id) ON DELETE CASCADE, + genesis_hash TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Global genesis (community_id = NULL) +INSERT INTO ledger_genesis (community_id, genesis_hash) +VALUES (NULL, 'GENESIS:' || encode(sha256('LIKWID_MODERATION_LEDGER_GENESIS_v1'::bytea), 'hex')); + +-- ============================================================================ +-- IMMUTABILITY PROTECTION +-- ============================================================================ + +-- Prevent UPDATE on ledger entries +CREATE OR REPLACE FUNCTION ledger_prevent_update() +RETURNS TRIGGER AS $$ +BEGIN + RAISE EXCEPTION 'Moderation ledger entries are immutable and cannot be modified'; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_ledger_no_update + BEFORE UPDATE ON moderation_ledger + FOR EACH ROW + EXECUTE FUNCTION ledger_prevent_update(); + +-- Prevent DELETE on ledger entries (except by superuser for GDPR compliance) +CREATE OR REPLACE FUNCTION ledger_prevent_delete() +RETURNS TRIGGER AS $$ +BEGIN + -- Allow deletion only by superuser (for legal compliance like GDPR) + IF NOT current_setting('likwid.allow_ledger_delete', true)::boolean THEN + RAISE EXCEPTION 'Moderation ledger entries cannot be deleted. Set likwid.allow_ledger_delete = true for legal compliance deletions.'; + END IF; + + -- Log the deletion attempt + INSERT INTO ledger_deletion_log (entry_id, deleted_by, reason) + VALUES (OLD.id, current_user, current_setting('likwid.deletion_reason', true)); + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_ledger_no_delete + BEFORE DELETE ON moderation_ledger + FOR EACH ROW + EXECUTE FUNCTION ledger_prevent_delete(); + +-- Log of any forced deletions (for GDPR compliance auditing) +CREATE TABLE ledger_deletion_log ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + entry_id UUID NOT NULL, -- ID of deleted entry (no FK since it's deleted) + deleted_by TEXT NOT NULL, + reason TEXT, + deleted_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Get the genesis hash for a community (or global) +CREATE OR REPLACE FUNCTION get_ledger_genesis_hash(p_community_id UUID) +RETURNS TEXT AS $$ +DECLARE + v_hash TEXT; +BEGIN + SELECT genesis_hash INTO v_hash + FROM ledger_genesis + WHERE community_id IS NOT DISTINCT FROM p_community_id; + + IF v_hash IS NULL THEN + -- Create genesis for this community + v_hash := 'GENESIS:' || encode(sha256(('LIKWID_COMMUNITY_' || COALESCE(p_community_id::text, 'GLOBAL'))::bytea), 'hex'); + INSERT INTO ledger_genesis (community_id, genesis_hash) VALUES (p_community_id, v_hash); + END IF; + + RETURN v_hash; +END; +$$ LANGUAGE plpgsql; + +-- Get the last entry hash for a community +CREATE OR REPLACE FUNCTION get_last_ledger_hash(p_community_id UUID) +RETURNS TEXT AS $$ +DECLARE + v_hash TEXT; +BEGIN + SELECT entry_hash INTO v_hash + FROM moderation_ledger + WHERE community_id IS NOT DISTINCT FROM p_community_id + ORDER BY sequence_number DESC + LIMIT 1; + + IF v_hash IS NULL THEN + RETURN get_ledger_genesis_hash(p_community_id); + END IF; + + RETURN v_hash; +END; +$$ LANGUAGE plpgsql; + +-- Get next sequence number for a community +CREATE OR REPLACE FUNCTION get_next_ledger_sequence(p_community_id UUID) +RETURNS BIGINT AS $$ +DECLARE + v_seq BIGINT; +BEGIN + SELECT COALESCE(MAX(sequence_number), 0) + 1 INTO v_seq + FROM moderation_ledger + WHERE community_id IS NOT DISTINCT FROM p_community_id; + + RETURN v_seq; +END; +$$ LANGUAGE plpgsql; + +-- Calculate entry hash (deterministic) +CREATE OR REPLACE FUNCTION calculate_ledger_entry_hash( + p_sequence BIGINT, + p_community_id UUID, + p_actor_user_id UUID, + p_action_type moderation_action_type, + p_target_type TEXT, + p_target_id UUID, + p_reason TEXT, + p_previous_hash TEXT, + p_created_at TIMESTAMPTZ +) +RETURNS TEXT AS $$ +DECLARE + v_content TEXT; +BEGIN + -- Create deterministic content string + v_content := + p_sequence::text || '|' || + COALESCE(p_community_id::text, 'NULL') || '|' || + p_actor_user_id::text || '|' || + p_action_type::text || '|' || + p_target_type || '|' || + p_target_id::text || '|' || + p_reason || '|' || + p_previous_hash || '|' || + p_created_at::text; + + RETURN encode(sha256(v_content::bytea), 'hex'); +END; +$$ LANGUAGE plpgsql IMMUTABLE; + +-- ============================================================================ +-- ENTRY CREATION (with automatic hashing) +-- ============================================================================ + +CREATE OR REPLACE FUNCTION create_ledger_entry( + p_community_id UUID, + p_actor_user_id UUID, + p_actor_role TEXT, + p_action_type moderation_action_type, + p_target_type TEXT, + p_target_id UUID, + p_reason TEXT, + p_rule_reference TEXT DEFAULT NULL, + p_evidence JSONB DEFAULT NULL, + p_target_snapshot JSONB DEFAULT NULL, + p_duration_hours INTEGER DEFAULT NULL, + p_decision_type TEXT DEFAULT 'unilateral', + p_vote_proposal_id UUID DEFAULT NULL, + p_vote_result JSONB DEFAULT NULL +) +RETURNS UUID AS $$ +DECLARE + v_entry_id UUID; + v_sequence BIGINT; + v_previous_hash TEXT; + v_entry_hash TEXT; + v_created_at TIMESTAMPTZ; + v_expires_at TIMESTAMPTZ; +BEGIN + -- Lock to prevent race conditions + PERFORM pg_advisory_xact_lock(hashtext('ledger_' || COALESCE(p_community_id::text, 'global'))); + + v_created_at := NOW(); + v_sequence := get_next_ledger_sequence(p_community_id); + v_previous_hash := get_last_ledger_hash(p_community_id); + + -- Calculate expiry if duration specified + IF p_duration_hours IS NOT NULL THEN + v_expires_at := v_created_at + (p_duration_hours || ' hours')::interval; + END IF; + + -- Calculate entry hash + v_entry_hash := calculate_ledger_entry_hash( + v_sequence, + p_community_id, + p_actor_user_id, + p_action_type, + p_target_type, + p_target_id, + p_reason, + v_previous_hash, + v_created_at + ); + + -- Insert the entry + INSERT INTO moderation_ledger ( + sequence_number, + community_id, + actor_user_id, + actor_role, + action_type, + target_type, + target_id, + target_snapshot, + reason, + rule_reference, + evidence, + duration_hours, + expires_at, + decision_type, + vote_proposal_id, + vote_result, + previous_hash, + entry_hash, + created_at + ) VALUES ( + v_sequence, + p_community_id, + p_actor_user_id, + p_actor_role, + p_action_type, + p_target_type, + p_target_id, + p_target_snapshot, + p_reason, + p_rule_reference, + p_evidence, + p_duration_hours, + v_expires_at, + p_decision_type, + p_vote_proposal_id, + p_vote_result, + v_previous_hash, + v_entry_hash, + v_created_at + ) + RETURNING id INTO v_entry_id; + + RETURN v_entry_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- CHAIN VERIFICATION +-- ============================================================================ + +-- Verify the integrity of the ledger chain for a community +CREATE OR REPLACE FUNCTION verify_ledger_chain(p_community_id UUID) +RETURNS TABLE ( + is_valid BOOLEAN, + total_entries BIGINT, + broken_at_sequence BIGINT, + expected_hash TEXT, + actual_hash TEXT, + error_message TEXT +) AS $$ +DECLARE + v_entry RECORD; + v_expected_hash TEXT; + v_calculated_hash TEXT; + v_count BIGINT := 0; +BEGIN + -- Get genesis hash + v_expected_hash := get_ledger_genesis_hash(p_community_id); + + -- Iterate through all entries in order + FOR v_entry IN + SELECT * FROM moderation_ledger + WHERE community_id IS NOT DISTINCT FROM p_community_id + ORDER BY sequence_number ASC + LOOP + v_count := v_count + 1; + + -- Check previous hash matches expected + IF v_entry.previous_hash != v_expected_hash THEN + RETURN QUERY SELECT + false, + v_count, + v_entry.sequence_number, + v_expected_hash, + v_entry.previous_hash, + 'Previous hash mismatch at sequence ' || v_entry.sequence_number; + RETURN; + END IF; + + -- Recalculate entry hash + v_calculated_hash := calculate_ledger_entry_hash( + v_entry.sequence_number, + v_entry.community_id, + v_entry.actor_user_id, + v_entry.action_type, + v_entry.target_type, + v_entry.target_id, + v_entry.reason, + v_entry.previous_hash, + v_entry.created_at + ); + + IF v_calculated_hash != v_entry.entry_hash THEN + RETURN QUERY SELECT + false, + v_count, + v_entry.sequence_number, + v_calculated_hash, + v_entry.entry_hash, + 'Entry hash mismatch at sequence ' || v_entry.sequence_number; + RETURN; + END IF; + + -- Next iteration expects this entry's hash + v_expected_hash := v_entry.entry_hash; + END LOOP; + + -- All good + RETURN QUERY SELECT true, v_count, NULL::BIGINT, NULL::TEXT, NULL::TEXT, NULL::TEXT; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS FOR EASIER QUERYING +-- ============================================================================ + +-- Human-readable ledger view +CREATE OR REPLACE VIEW v_moderation_ledger AS +SELECT + ml.id, + ml.sequence_number, + ml.community_id, + c.name AS community_name, + ml.actor_user_id, + u.username AS actor_username, + u.display_name AS actor_display_name, + ml.actor_role, + ml.action_type, + ml.target_type, + ml.target_id, + ml.reason, + ml.rule_reference, + ml.evidence, + ml.duration_hours, + ml.expires_at, + ml.decision_type, + ml.vote_proposal_id, + ml.entry_hash, + ml.created_at, + CASE + WHEN ml.expires_at IS NULL THEN 'permanent' + WHEN ml.expires_at > NOW() THEN 'active' + ELSE 'expired' + END AS status +FROM moderation_ledger ml +LEFT JOIN communities c ON c.id = ml.community_id +LEFT JOIN users u ON u.id = ml.actor_user_id; + +-- Summary statistics +CREATE OR REPLACE VIEW v_moderation_stats AS +SELECT + community_id, + action_type, + decision_type, + COUNT(*) AS total_actions, + COUNT(DISTINCT actor_user_id) AS unique_actors, + COUNT(DISTINCT target_id) AS unique_targets, + MIN(created_at) AS first_action, + MAX(created_at) AS last_action +FROM moderation_ledger +GROUP BY community_id, action_type, decision_type; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +-- Register as a core, non-deactivatable plugin +INSERT INTO plugins ( + name, + description, + version, + is_core, + is_active, + settings_schema +) VALUES ( + 'moderation_ledger', + 'Immutable, cryptographically-chained log of all moderation decisions. This plugin cannot be deactivated as it is essential for transparency and accountability.', + '1.0.0', + true, -- Core plugin + true, -- Always active + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'require_reason_min_length', jsonb_build_object( + 'type', 'integer', + 'title', 'Minimum reason length', + 'description', 'Minimum characters required for moderation justifications', + 'default', 20, + 'minimum', 10, + 'maximum', 500 + ), + 'require_rule_reference', jsonb_build_object( + 'type', 'boolean', + 'title', 'Require rule reference', + 'description', 'Require moderators to cite a specific community rule', + 'default', false + ), + 'snapshot_target_content', jsonb_build_object( + 'type', 'boolean', + 'title', 'Snapshot target content', + 'description', 'Store a snapshot of content before moderation (uses more storage)', + 'default', true + ), + 'public_ledger', jsonb_build_object( + 'type', 'boolean', + 'title', 'Public ledger', + 'description', 'Allow all community members to view the moderation ledger', + 'default', true + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description, + is_core = true, + is_active = true; + +-- ============================================================================ +-- COMMENTS +-- ============================================================================ + +COMMENT ON TABLE moderation_ledger IS 'Immutable, cryptographically-chained log of all moderation decisions. Entries cannot be modified or deleted (except for legal compliance).'; +COMMENT ON COLUMN moderation_ledger.entry_hash IS 'SHA-256 hash of entry content, linked to previous entry hash forming a tamper-evident chain.'; +COMMENT ON COLUMN moderation_ledger.previous_hash IS 'Hash of the previous entry in the chain, or genesis hash for first entry.'; +COMMENT ON FUNCTION create_ledger_entry IS 'Creates a new ledger entry with automatic sequence numbering and hash chain maintenance.'; +COMMENT ON FUNCTION verify_ledger_chain IS 'Verifies the cryptographic integrity of the entire ledger chain for a community.'; diff --git a/backend/migrations/20260126260000_decision_workflows.sql b/backend/migrations/20260126260000_decision_workflows.sql new file mode 100644 index 0000000..4bec195 --- /dev/null +++ b/backend/migrations/20260126260000_decision_workflows.sql @@ -0,0 +1,611 @@ +-- ============================================================================ +-- DECISION-MAKING PROCESSES PLUGIN +-- Composable decision-making workflows with configurable phases +-- ============================================================================ + +-- ============================================================================ +-- WORKFLOW TEMPLATES +-- Reusable workflow definitions that can be applied to proposals +-- ============================================================================ + +CREATE TABLE workflow_templates ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + description TEXT, + is_default BOOLEAN NOT NULL DEFAULT FALSE, + is_system BOOLEAN NOT NULL DEFAULT FALSE, + config JSONB NOT NULL DEFAULT '{}', + created_by UUID REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, name) +); + +CREATE INDEX idx_workflow_templates_community ON workflow_templates(community_id); +CREATE INDEX idx_workflow_templates_default ON workflow_templates(community_id, is_default) WHERE is_default = true; + +COMMENT ON TABLE workflow_templates IS 'Reusable workflow definitions for decision-making processes'; + +-- ============================================================================ +-- WORKFLOW PHASES +-- Individual phases within a workflow (discussion, amendment, vote, etc.) +-- ============================================================================ + +CREATE TYPE workflow_phase_type AS ENUM ( + 'discussion', -- Open discussion period + 'amendment', -- Proposal amendment/refinement period + 'review', -- Expert/committee review period + 'vote', -- Active voting period + 'runoff', -- Runoff voting if needed + 'ratification', -- Final ratification/approval + 'implementation', -- Implementation tracking + 'cooldown', -- Waiting period between phases + 'custom' -- Custom phase type +); + +CREATE TABLE workflow_phases ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + template_id UUID NOT NULL REFERENCES workflow_templates(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + phase_type workflow_phase_type NOT NULL, + sequence_order INT NOT NULL, + description TEXT, + + -- Duration configuration + min_duration_hours INT, + max_duration_hours INT, + default_duration_hours INT NOT NULL DEFAULT 168, -- 1 week + allow_early_completion BOOLEAN NOT NULL DEFAULT FALSE, + + -- Quorum configuration + quorum_type VARCHAR(50) NOT NULL DEFAULT 'percentage', -- percentage, absolute, adaptive + quorum_value DECIMAL(10, 4) NOT NULL DEFAULT 0.10, -- 10% default + quorum_scope VARCHAR(50) NOT NULL DEFAULT 'community', -- community, participants, delegated + + -- Participation requirements + require_reading BOOLEAN NOT NULL DEFAULT FALSE, + require_comment BOOLEAN NOT NULL DEFAULT FALSE, + min_unique_participants INT, + + -- Transition rules + auto_advance BOOLEAN NOT NULL DEFAULT TRUE, + advance_condition JSONB NOT NULL DEFAULT '{"type": "duration"}', + failure_action VARCHAR(50) NOT NULL DEFAULT 'reject', -- reject, extend, escalate, restart + + -- Phase-specific config + phase_config JSONB NOT NULL DEFAULT '{}', + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(template_id, sequence_order) +); + +CREATE INDEX idx_workflow_phases_template ON workflow_phases(template_id); +CREATE INDEX idx_workflow_phases_order ON workflow_phases(template_id, sequence_order); + +COMMENT ON TABLE workflow_phases IS 'Individual phases within a workflow template'; + +-- ============================================================================ +-- PHASE DEPENDENCIES +-- Define dependencies between phases (e.g., vote requires discussion) +-- ============================================================================ + +CREATE TABLE phase_dependencies ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + phase_id UUID NOT NULL REFERENCES workflow_phases(id) ON DELETE CASCADE, + depends_on_phase_id UUID NOT NULL REFERENCES workflow_phases(id) ON DELETE CASCADE, + dependency_type VARCHAR(50) NOT NULL DEFAULT 'completion', -- completion, quorum_met, majority + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(phase_id, depends_on_phase_id), + CHECK (phase_id != depends_on_phase_id) +); + +CREATE INDEX idx_phase_dependencies_phase ON phase_dependencies(phase_id); + +COMMENT ON TABLE phase_dependencies IS 'Dependencies between workflow phases'; + +-- ============================================================================ +-- WORKFLOW INSTANCES +-- Active workflow instances attached to proposals +-- ============================================================================ + +CREATE TABLE workflow_instances ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + template_id UUID NOT NULL REFERENCES workflow_templates(id), + current_phase_id UUID REFERENCES workflow_phases(id), + status VARCHAR(50) NOT NULL DEFAULT 'active', -- active, paused, completed, failed, cancelled + started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + completed_at TIMESTAMPTZ, + failure_reason TEXT, + metadata JSONB NOT NULL DEFAULT '{}', + UNIQUE(proposal_id) +); + +CREATE INDEX idx_workflow_instances_proposal ON workflow_instances(proposal_id); +CREATE INDEX idx_workflow_instances_status ON workflow_instances(status); +CREATE INDEX idx_workflow_instances_current_phase ON workflow_instances(current_phase_id); + +COMMENT ON TABLE workflow_instances IS 'Active workflow instances for proposals'; + +-- ============================================================================ +-- PHASE INSTANCES +-- Tracking individual phase executions +-- ============================================================================ + +CREATE TABLE phase_instances ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + workflow_instance_id UUID NOT NULL REFERENCES workflow_instances(id) ON DELETE CASCADE, + phase_id UUID NOT NULL REFERENCES workflow_phases(id), + status VARCHAR(50) NOT NULL DEFAULT 'pending', -- pending, active, completed, skipped, failed + + -- Timing + scheduled_start TIMESTAMPTZ, + actual_start TIMESTAMPTZ, + scheduled_end TIMESTAMPTZ, + actual_end TIMESTAMPTZ, + extended_count INT NOT NULL DEFAULT 0, + + -- Participation tracking + participant_count INT NOT NULL DEFAULT 0, + quorum_reached BOOLEAN NOT NULL DEFAULT FALSE, + quorum_reached_at TIMESTAMPTZ, + + -- Results + result JSONB, + completion_reason VARCHAR(100), -- duration, quorum, manual, early_completion, failure + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(workflow_instance_id, phase_id) +); + +CREATE INDEX idx_phase_instances_workflow ON phase_instances(workflow_instance_id); +CREATE INDEX idx_phase_instances_status ON phase_instances(status); +CREATE INDEX idx_phase_instances_active ON phase_instances(status, scheduled_end) WHERE status = 'active'; + +COMMENT ON TABLE phase_instances IS 'Individual phase execution tracking'; + +-- ============================================================================ +-- PHASE PARTICIPATION +-- Track who participated in each phase +-- ============================================================================ + +CREATE TABLE phase_participation ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + phase_instance_id UUID NOT NULL REFERENCES phase_instances(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + participation_type VARCHAR(50) NOT NULL, -- viewed, commented, voted, amended + participated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + metadata JSONB NOT NULL DEFAULT '{}', + UNIQUE(phase_instance_id, user_id, participation_type) +); + +CREATE INDEX idx_phase_participation_instance ON phase_participation(phase_instance_id); +CREATE INDEX idx_phase_participation_user ON phase_participation(user_id); + +COMMENT ON TABLE phase_participation IS 'Participation tracking per phase'; + +-- ============================================================================ +-- WORKFLOW TRANSITIONS +-- Log of all phase transitions +-- ============================================================================ + +CREATE TABLE workflow_transitions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + workflow_instance_id UUID NOT NULL REFERENCES workflow_instances(id) ON DELETE CASCADE, + from_phase_id UUID REFERENCES workflow_phases(id), + to_phase_id UUID REFERENCES workflow_phases(id), + transition_type VARCHAR(50) NOT NULL, -- advance, skip, restart, fail, complete + triggered_by VARCHAR(50) NOT NULL, -- auto, manual, quorum, timeout + triggered_by_user_id UUID REFERENCES users(id), + reason TEXT, + metadata JSONB NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_workflow_transitions_instance ON workflow_transitions(workflow_instance_id); +CREATE INDEX idx_workflow_transitions_time ON workflow_transitions(created_at); + +COMMENT ON TABLE workflow_transitions IS 'Audit log of workflow phase transitions'; + +-- ============================================================================ +-- QUORUM SNAPSHOTS +-- Track quorum status over time +-- ============================================================================ + +CREATE TABLE quorum_snapshots ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + phase_instance_id UUID NOT NULL REFERENCES phase_instances(id) ON DELETE CASCADE, + snapshot_time TIMESTAMPTZ NOT NULL DEFAULT NOW(), + eligible_count INT NOT NULL, + participant_count INT NOT NULL, + quorum_required DECIMAL(10, 4) NOT NULL, + quorum_current DECIMAL(10, 4) NOT NULL, + is_met BOOLEAN NOT NULL, + calculation_details JSONB NOT NULL DEFAULT '{}' +); + +CREATE INDEX idx_quorum_snapshots_instance ON quorum_snapshots(phase_instance_id); +CREATE INDEX idx_quorum_snapshots_time ON quorum_snapshots(phase_instance_id, snapshot_time); + +COMMENT ON TABLE quorum_snapshots IS 'Historical quorum tracking for transparency'; + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Calculate current quorum for a phase instance +CREATE OR REPLACE FUNCTION calculate_phase_quorum(p_phase_instance_id UUID) +RETURNS TABLE ( + eligible_count INT, + participant_count INT, + quorum_required DECIMAL, + quorum_current DECIMAL, + is_met BOOLEAN +) AS $$ +DECLARE + v_phase_id UUID; + v_workflow_instance_id UUID; + v_community_id UUID; + v_quorum_type VARCHAR(50); + v_quorum_value DECIMAL; + v_quorum_scope VARCHAR(50); +BEGIN + -- Get phase configuration + SELECT pi.phase_id, pi.workflow_instance_id, wp.quorum_type, wp.quorum_value, wp.quorum_scope + INTO v_phase_id, v_workflow_instance_id, v_quorum_type, v_quorum_value, v_quorum_scope + FROM phase_instances pi + JOIN workflow_phases wp ON wp.id = pi.phase_id + WHERE pi.id = p_phase_instance_id; + + -- Get community + SELECT p.community_id INTO v_community_id + FROM workflow_instances wi + JOIN proposals p ON p.id = wi.proposal_id + WHERE wi.id = v_workflow_instance_id; + + -- Calculate eligible count based on scope + IF v_quorum_scope = 'community' THEN + SELECT COUNT(*) INTO eligible_count + FROM community_members + WHERE community_id = v_community_id; + ELSIF v_quorum_scope = 'participants' THEN + SELECT COUNT(DISTINCT user_id) INTO eligible_count + FROM phase_participation pp + JOIN phase_instances pi ON pi.id = pp.phase_instance_id + WHERE pi.workflow_instance_id = v_workflow_instance_id; + ELSE + SELECT COUNT(*) INTO eligible_count + FROM community_members + WHERE community_id = v_community_id; + END IF; + + -- Get participant count for this phase + SELECT COUNT(DISTINCT user_id) INTO participant_count + FROM phase_participation + WHERE phase_instance_id = p_phase_instance_id; + + -- Calculate quorum + IF v_quorum_type = 'percentage' THEN + quorum_required := v_quorum_value; + IF eligible_count > 0 THEN + quorum_current := participant_count::DECIMAL / eligible_count; + ELSE + quorum_current := 0; + END IF; + ELSIF v_quorum_type = 'absolute' THEN + quorum_required := v_quorum_value; + quorum_current := participant_count; + ELSE + quorum_required := v_quorum_value; + quorum_current := CASE WHEN eligible_count > 0 + THEN participant_count::DECIMAL / eligible_count + ELSE 0 END; + END IF; + + is_met := quorum_current >= quorum_required; + + RETURN NEXT; +END; +$$ LANGUAGE plpgsql; + +-- Advance workflow to next phase +CREATE OR REPLACE FUNCTION advance_workflow_phase( + p_workflow_instance_id UUID, + p_triggered_by VARCHAR(50) DEFAULT 'auto', + p_triggered_by_user_id UUID DEFAULT NULL, + p_reason TEXT DEFAULT NULL +) RETURNS UUID AS $$ +DECLARE + v_current_phase_id UUID; + v_next_phase_id UUID; + v_current_phase_instance_id UUID; + v_new_phase_instance_id UUID; + v_phase_config workflow_phases%ROWTYPE; +BEGIN + -- Get current phase + SELECT current_phase_id INTO v_current_phase_id + FROM workflow_instances + WHERE id = p_workflow_instance_id; + + -- Get current phase instance + SELECT id INTO v_current_phase_instance_id + FROM phase_instances + WHERE workflow_instance_id = p_workflow_instance_id AND phase_id = v_current_phase_id; + + -- Complete current phase + IF v_current_phase_instance_id IS NOT NULL THEN + UPDATE phase_instances + SET status = 'completed', + actual_end = NOW(), + completion_reason = p_triggered_by + WHERE id = v_current_phase_instance_id; + END IF; + + -- Find next phase + SELECT wp.id INTO v_next_phase_id + FROM workflow_phases wp + JOIN workflow_instances wi ON wi.template_id = wp.template_id + WHERE wi.id = p_workflow_instance_id + AND wp.sequence_order > COALESCE( + (SELECT sequence_order FROM workflow_phases WHERE id = v_current_phase_id), -1 + ) + ORDER BY wp.sequence_order + LIMIT 1; + + -- Log transition + INSERT INTO workflow_transitions ( + workflow_instance_id, from_phase_id, to_phase_id, + transition_type, triggered_by, triggered_by_user_id, reason + ) VALUES ( + p_workflow_instance_id, v_current_phase_id, v_next_phase_id, + CASE WHEN v_next_phase_id IS NULL THEN 'complete' ELSE 'advance' END, + p_triggered_by, p_triggered_by_user_id, p_reason + ); + + IF v_next_phase_id IS NULL THEN + -- Workflow complete + UPDATE workflow_instances + SET status = 'completed', + current_phase_id = NULL, + completed_at = NOW() + WHERE id = p_workflow_instance_id; + RETURN NULL; + END IF; + + -- Get next phase config + SELECT * INTO v_phase_config FROM workflow_phases WHERE id = v_next_phase_id; + + -- Create new phase instance + INSERT INTO phase_instances ( + workflow_instance_id, phase_id, status, + scheduled_start, actual_start, scheduled_end + ) VALUES ( + p_workflow_instance_id, v_next_phase_id, 'active', + NOW(), NOW(), + NOW() + (v_phase_config.default_duration_hours || ' hours')::INTERVAL + ) RETURNING id INTO v_new_phase_instance_id; + + -- Update workflow instance + UPDATE workflow_instances + SET current_phase_id = v_next_phase_id + WHERE id = p_workflow_instance_id; + + RETURN v_new_phase_instance_id; +END; +$$ LANGUAGE plpgsql; + +-- Start a workflow for a proposal +CREATE OR REPLACE FUNCTION start_workflow( + p_proposal_id UUID, + p_template_id UUID +) RETURNS UUID AS $$ +DECLARE + v_workflow_instance_id UUID; + v_first_phase_id UUID; + v_phase_config workflow_phases%ROWTYPE; +BEGIN + -- Create workflow instance + INSERT INTO workflow_instances (proposal_id, template_id, status) + VALUES (p_proposal_id, p_template_id, 'active') + RETURNING id INTO v_workflow_instance_id; + + -- Get first phase + SELECT id INTO v_first_phase_id + FROM workflow_phases + WHERE template_id = p_template_id + ORDER BY sequence_order + LIMIT 1; + + IF v_first_phase_id IS NOT NULL THEN + SELECT * INTO v_phase_config FROM workflow_phases WHERE id = v_first_phase_id; + + -- Create first phase instance + INSERT INTO phase_instances ( + workflow_instance_id, phase_id, status, + scheduled_start, actual_start, scheduled_end + ) VALUES ( + v_workflow_instance_id, v_first_phase_id, 'active', + NOW(), NOW(), + NOW() + (v_phase_config.default_duration_hours || ' hours')::INTERVAL + ); + + -- Update workflow with current phase + UPDATE workflow_instances + SET current_phase_id = v_first_phase_id + WHERE id = v_workflow_instance_id; + END IF; + + RETURN v_workflow_instance_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- DEFAULT WORKFLOW TEMPLATES +-- ============================================================================ + +-- Standard governance workflow +INSERT INTO workflow_templates (id, community_id, name, description, is_system, config) +VALUES ( + 'a0000000-0000-0000-0000-000000000001'::UUID, + NULL, -- Global template + 'Standard Governance', + 'Standard 4-phase governance workflow: Discussion → Amendment → Vote → Ratification', + true, + jsonb_build_object( + 'allow_skip_phases', false, + 'require_all_phases', true, + 'notify_on_transition', true + ) +); + +-- Insert phases for standard workflow +INSERT INTO workflow_phases (template_id, name, phase_type, sequence_order, description, default_duration_hours, quorum_value, phase_config) +VALUES + ('a0000000-0000-0000-0000-000000000001'::UUID, 'Discussion', 'discussion', 1, + 'Open discussion period for community input', 168, 0.05, + '{"allow_amendments": false, "min_comments": 3}'::JSONB), + ('a0000000-0000-0000-0000-000000000001'::UUID, 'Amendment', 'amendment', 2, + 'Proposal refinement based on feedback', 72, 0.03, + '{"allow_amendments": true, "require_author_approval": true}'::JSONB), + ('a0000000-0000-0000-0000-000000000001'::UUID, 'Voting', 'vote', 3, + 'Community voting on the proposal', 168, 0.15, + '{"voting_method": "schulze", "allow_abstain": true}'::JSONB), + ('a0000000-0000-0000-0000-000000000001'::UUID, 'Ratification', 'ratification', 4, + 'Final ratification and implementation planning', 48, 0.10, + '{"require_implementation_plan": true}'::JSONB); + +-- Quick decision workflow +INSERT INTO workflow_templates (id, community_id, name, description, is_system, config) +VALUES ( + 'a0000000-0000-0000-0000-000000000002'::UUID, + NULL, + 'Quick Decision', + 'Expedited 2-phase workflow for time-sensitive decisions', + true, + jsonb_build_object( + 'allow_skip_phases', true, + 'max_duration_hours', 72 + ) +); + +INSERT INTO workflow_phases (template_id, name, phase_type, sequence_order, description, default_duration_hours, quorum_value, allow_early_completion) +VALUES + ('a0000000-0000-0000-0000-000000000002'::UUID, 'Discussion & Review', 'discussion', 1, + 'Combined discussion and review period', 24, 0.10, true), + ('a0000000-0000-0000-0000-000000000002'::UUID, 'Voting', 'vote', 2, + 'Community voting', 48, 0.20, true); + +-- Consensus-seeking workflow +INSERT INTO workflow_templates (id, community_id, name, description, is_system, config) +VALUES ( + 'a0000000-0000-0000-0000-000000000003'::UUID, + NULL, + 'Consensus Building', + 'Extended workflow focused on achieving broad consensus', + true, + jsonb_build_object( + 'consensus_threshold', 0.75, + 'allow_multiple_rounds', true + ) +); + +INSERT INTO workflow_phases (template_id, name, phase_type, sequence_order, description, default_duration_hours, quorum_value, require_comment) +VALUES + ('a0000000-0000-0000-0000-000000000003'::UUID, 'Initial Discussion', 'discussion', 1, + 'Gather initial perspectives', 168, 0.10, false), + ('a0000000-0000-0000-0000-000000000003'::UUID, 'Synthesis', 'amendment', 2, + 'Synthesize feedback into revised proposal', 72, 0.05, true), + ('a0000000-0000-0000-0000-000000000003'::UUID, 'Consensus Check', 'vote', 3, + 'Check for consensus (75% threshold)', 72, 0.25, false), + ('a0000000-0000-0000-0000-000000000003'::UUID, 'Final Adjustments', 'amendment', 4, + 'Address remaining concerns', 48, 0.05, true), + ('a0000000-0000-0000-0000-000000000003'::UUID, 'Final Vote', 'vote', 5, + 'Final community decision', 168, 0.30, false); + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW active_workflows AS +SELECT + wi.id AS workflow_instance_id, + wi.proposal_id, + p.title AS proposal_title, + wt.name AS workflow_name, + wp.name AS current_phase_name, + wp.phase_type AS current_phase_type, + pi.scheduled_end AS phase_deadline, + pi.participant_count, + pi.quorum_reached, + wi.started_at, + wi.status +FROM workflow_instances wi +JOIN proposals p ON p.id = wi.proposal_id +JOIN workflow_templates wt ON wt.id = wi.template_id +LEFT JOIN workflow_phases wp ON wp.id = wi.current_phase_id +LEFT JOIN phase_instances pi ON pi.workflow_instance_id = wi.id AND pi.phase_id = wi.current_phase_id +WHERE wi.status = 'active'; + +CREATE OR REPLACE VIEW workflow_progress AS +SELECT + wi.id AS workflow_instance_id, + wi.proposal_id, + wt.name AS workflow_name, + COUNT(wp.id) AS total_phases, + COUNT(pi.id) FILTER (WHERE pi.status = 'completed') AS completed_phases, + ROUND( + COUNT(pi.id) FILTER (WHERE pi.status = 'completed')::DECIMAL / + NULLIF(COUNT(wp.id), 0) * 100, 1 + ) AS progress_percentage +FROM workflow_instances wi +JOIN workflow_templates wt ON wt.id = wi.template_id +JOIN workflow_phases wp ON wp.template_id = wt.id +LEFT JOIN phase_instances pi ON pi.workflow_instance_id = wi.id AND pi.phase_id = wp.id +GROUP BY wi.id, wi.proposal_id, wt.name; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'decision_workflows', + 'Composable decision-making workflows with configurable phases, timeouts, and quorum requirements. Foundation for all governance processes.', + '1.0.0', + true, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'default_workflow', jsonb_build_object( + 'type', 'string', + 'title', 'Default Workflow', + 'description', 'Default workflow template for new proposals', + 'default', 'Standard Governance' + ), + 'allow_custom_workflows', jsonb_build_object( + 'type', 'boolean', + 'title', 'Allow Custom Workflows', + 'description', 'Allow communities to create custom workflow templates', + 'default', true + ), + 'auto_advance_phases', jsonb_build_object( + 'type', 'boolean', + 'title', 'Auto-advance Phases', + 'description', 'Automatically advance to next phase when conditions are met', + 'default', true + ), + 'quorum_check_interval_minutes', jsonb_build_object( + 'type', 'integer', + 'title', 'Quorum Check Interval', + 'description', 'How often to check and record quorum status (minutes)', + 'default', 60, + 'minimum', 5, + 'maximum', 1440 + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126270000_self_moderation_rules.sql b/backend/migrations/20260126270000_self_moderation_rules.sql new file mode 100644 index 0000000..bc3b1f1 --- /dev/null +++ b/backend/migrations/20260126270000_self_moderation_rules.sql @@ -0,0 +1,541 @@ +-- ============================================================================ +-- SELF-MODERATION RULES PLUGIN +-- Configurable community rules with automatic escalation and voting on sanctions +-- ============================================================================ + +-- ============================================================================ +-- COMMUNITY RULES +-- Configurable rules that communities can define +-- ============================================================================ + +CREATE TYPE rule_severity AS ENUM ('info', 'warning', 'minor', 'major', 'critical'); +CREATE TYPE rule_scope AS ENUM ('content', 'behavior', 'voting', 'participation', 'meta'); + +CREATE TABLE community_rules ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + + -- Rule definition + code VARCHAR(50) NOT NULL, + title VARCHAR(200) NOT NULL, + description TEXT NOT NULL, + scope rule_scope NOT NULL DEFAULT 'behavior', + severity rule_severity NOT NULL DEFAULT 'warning', + + -- Rule behavior + is_active BOOLEAN NOT NULL DEFAULT TRUE, + requires_human_review BOOLEAN NOT NULL DEFAULT TRUE, + auto_detection_enabled BOOLEAN NOT NULL DEFAULT FALSE, + detection_patterns JSONB, -- For automated detection + + -- Sanctions + default_sanction_type VARCHAR(50), + default_sanction_duration_hours INT, + escalation_multiplier DECIMAL(3,2) NOT NULL DEFAULT 1.5, + max_escalation_level INT NOT NULL DEFAULT 3, + + -- Voting on sanctions + allow_community_vote BOOLEAN NOT NULL DEFAULT FALSE, + vote_threshold DECIMAL(5,4) NOT NULL DEFAULT 0.6667, -- 2/3 majority + vote_quorum DECIMAL(5,4) NOT NULL DEFAULT 0.10, + vote_duration_hours INT NOT NULL DEFAULT 48, + + -- Metadata + created_by UUID REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + version INT NOT NULL DEFAULT 1, + + UNIQUE(community_id, code) +); + +CREATE INDEX idx_community_rules_community ON community_rules(community_id); +CREATE INDEX idx_community_rules_active ON community_rules(community_id, is_active) WHERE is_active = true; +CREATE INDEX idx_community_rules_scope ON community_rules(community_id, scope); + +COMMENT ON TABLE community_rules IS 'Community-defined moderation rules'; + +-- ============================================================================ +-- RULE VIOLATIONS +-- Reported or detected rule violations +-- ============================================================================ + +CREATE TYPE violation_status AS ENUM ( + 'reported', -- Initial report + 'under_review', -- Being reviewed by moderator + 'pending_vote', -- Community vote in progress + 'confirmed', -- Violation confirmed + 'dismissed', -- Report dismissed + 'appealed', -- Under appeal + 'resolved' -- Sanction applied and completed +); + +CREATE TABLE rule_violations ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + rule_id UUID NOT NULL REFERENCES community_rules(id), + + -- Violation details + target_user_id UUID NOT NULL REFERENCES users(id), + target_content_id UUID, + target_content_type VARCHAR(50), + + -- Reporter + reported_by UUID REFERENCES users(id), + reported_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + report_reason TEXT, + report_evidence JSONB, + + -- Status tracking + status violation_status NOT NULL DEFAULT 'reported', + escalation_level INT NOT NULL DEFAULT 0, + + -- Review + reviewed_by UUID REFERENCES users(id), + reviewed_at TIMESTAMPTZ, + review_notes TEXT, + + -- Community vote (if applicable) + vote_proposal_id UUID REFERENCES proposals(id), + vote_started_at TIMESTAMPTZ, + vote_ended_at TIMESTAMPTZ, + vote_result JSONB, + + -- Resolution + resolved_at TIMESTAMPTZ, + resolution_type VARCHAR(50), -- sanction_applied, dismissed, appealed_overturned + resolution_notes TEXT, + + -- Audit + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_rule_violations_community ON rule_violations(community_id); +CREATE INDEX idx_rule_violations_target ON rule_violations(target_user_id); +CREATE INDEX idx_rule_violations_status ON rule_violations(status); +CREATE INDEX idx_rule_violations_rule ON rule_violations(rule_id); +CREATE INDEX idx_rule_violations_pending ON rule_violations(community_id, status) + WHERE status IN ('reported', 'under_review', 'pending_vote'); + +COMMENT ON TABLE rule_violations IS 'Tracked rule violations and their resolution'; + +-- ============================================================================ +-- SANCTIONS +-- Applied sanctions for violations +-- ============================================================================ + +CREATE TYPE sanction_type AS ENUM ( + 'warning', -- Official warning + 'content_removal', -- Remove offending content + 'temporary_mute', -- Cannot post/comment temporarily + 'temporary_suspend', -- Cannot access community temporarily + 'voting_suspension', -- Cannot vote temporarily + 'permanent_ban', -- Permanent community ban + 'custom' -- Custom sanction +); + +CREATE TYPE sanction_status AS ENUM ( + 'pending', -- Awaiting application + 'active', -- Currently in effect + 'completed', -- Duration expired + 'lifted', -- Manually lifted early + 'appealed' -- Overturned on appeal +); + +CREATE TABLE sanctions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + violation_id UUID NOT NULL REFERENCES rule_violations(id), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + target_user_id UUID NOT NULL REFERENCES users(id), + + -- Sanction details + sanction_type sanction_type NOT NULL, + severity_level INT NOT NULL DEFAULT 1, + description TEXT, + + -- Duration + starts_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + duration_hours INT, + expires_at TIMESTAMPTZ, + + -- Status + status sanction_status NOT NULL DEFAULT 'active', + + -- Applied by + applied_by UUID REFERENCES users(id), + applied_via VARCHAR(50) NOT NULL DEFAULT 'manual', -- manual, vote, auto + + -- Lifting/Appeal + lifted_at TIMESTAMPTZ, + lifted_by UUID REFERENCES users(id), + lift_reason TEXT, + + -- Audit + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + ledger_entry_id UUID -- Reference to moderation ledger +); + +CREATE INDEX idx_sanctions_violation ON sanctions(violation_id); +CREATE INDEX idx_sanctions_target ON sanctions(target_user_id); +CREATE INDEX idx_sanctions_community ON sanctions(community_id); +CREATE INDEX idx_sanctions_status ON sanctions(status); +CREATE INDEX idx_sanctions_active ON sanctions(target_user_id, status, expires_at) + WHERE status = 'active'; + +COMMENT ON TABLE sanctions IS 'Applied sanctions for rule violations'; + +-- ============================================================================ +-- USER VIOLATION HISTORY +-- Aggregated view of user's violation history +-- ============================================================================ + +CREATE TABLE user_violation_summary ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + user_id UUID NOT NULL REFERENCES users(id), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + + -- Counts + total_violations INT NOT NULL DEFAULT 0, + confirmed_violations INT NOT NULL DEFAULT 0, + dismissed_violations INT NOT NULL DEFAULT 0, + + -- Sanctions + total_sanctions INT NOT NULL DEFAULT 0, + active_sanctions INT NOT NULL DEFAULT 0, + warnings_count INT NOT NULL DEFAULT 0, + mutes_count INT NOT NULL DEFAULT 0, + suspensions_count INT NOT NULL DEFAULT 0, + + -- Escalation tracking + current_escalation_level INT NOT NULL DEFAULT 0, + last_violation_at TIMESTAMPTZ, + last_sanction_at TIMESTAMPTZ, + + -- Good standing calculation + days_since_last_violation INT, + is_in_good_standing BOOLEAN NOT NULL DEFAULT TRUE, + + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(user_id, community_id) +); + +CREATE INDEX idx_user_violation_summary_user ON user_violation_summary(user_id); +CREATE INDEX idx_user_violation_summary_community ON user_violation_summary(community_id); + +COMMENT ON TABLE user_violation_summary IS 'Aggregated violation history per user per community'; + +-- ============================================================================ +-- ESCALATION RULES +-- Define how sanctions escalate with repeated violations +-- ============================================================================ + +CREATE TABLE escalation_rules ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + rule_id UUID REFERENCES community_rules(id) ON DELETE CASCADE, + + -- Escalation level + level INT NOT NULL, + + -- Sanction for this level + sanction_type sanction_type NOT NULL, + duration_hours INT, + additional_actions JSONB, + + -- Conditions + cooldown_days INT NOT NULL DEFAULT 90, -- Days before escalation resets + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, rule_id, level) +); + +CREATE INDEX idx_escalation_rules_community ON escalation_rules(community_id); +CREATE INDEX idx_escalation_rules_rule ON escalation_rules(rule_id); + +COMMENT ON TABLE escalation_rules IS 'Escalation ladder for repeated violations'; + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Calculate escalation level for a user/rule combination +CREATE OR REPLACE FUNCTION calculate_escalation_level( + p_user_id UUID, + p_community_id UUID, + p_rule_id UUID +) RETURNS INT AS $$ +DECLARE + v_level INT; + v_cooldown_days INT; +BEGIN + -- Get the escalation cooldown + SELECT COALESCE(MAX(cooldown_days), 90) INTO v_cooldown_days + FROM escalation_rules + WHERE community_id = p_community_id AND (rule_id = p_rule_id OR rule_id IS NULL); + + -- Count confirmed violations within cooldown period + SELECT COUNT(*) INTO v_level + FROM rule_violations + WHERE target_user_id = p_user_id + AND community_id = p_community_id + AND rule_id = p_rule_id + AND status = 'confirmed' + AND resolved_at > NOW() - (v_cooldown_days || ' days')::INTERVAL; + + RETURN v_level; +END; +$$ LANGUAGE plpgsql; + +-- Get appropriate sanction for escalation level +CREATE OR REPLACE FUNCTION get_escalated_sanction( + p_community_id UUID, + p_rule_id UUID, + p_level INT +) RETURNS TABLE ( + sanction_type sanction_type, + duration_hours INT +) AS $$ +BEGIN + -- Try rule-specific escalation + RETURN QUERY + SELECT er.sanction_type, er.duration_hours + FROM escalation_rules er + WHERE er.community_id = p_community_id + AND er.rule_id = p_rule_id + AND er.level = p_level + LIMIT 1; + + IF NOT FOUND THEN + -- Try community default escalation + RETURN QUERY + SELECT er.sanction_type, er.duration_hours + FROM escalation_rules er + WHERE er.community_id = p_community_id + AND er.rule_id IS NULL + AND er.level = p_level + LIMIT 1; + END IF; + + IF NOT FOUND THEN + -- Default escalation ladder + RETURN QUERY + SELECT + CASE p_level + WHEN 1 THEN 'warning'::sanction_type + WHEN 2 THEN 'temporary_mute'::sanction_type + WHEN 3 THEN 'temporary_suspend'::sanction_type + ELSE 'permanent_ban'::sanction_type + END, + CASE p_level + WHEN 1 THEN NULL::INT + WHEN 2 THEN 24 + WHEN 3 THEN 168 + ELSE NULL + END; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- Apply a sanction +CREATE OR REPLACE FUNCTION apply_sanction( + p_violation_id UUID, + p_sanction_type sanction_type, + p_duration_hours INT, + p_applied_by UUID, + p_applied_via VARCHAR(50) +) RETURNS UUID AS $$ +DECLARE + v_violation rule_violations%ROWTYPE; + v_sanction_id UUID; + v_expires_at TIMESTAMPTZ; +BEGIN + -- Get violation details + SELECT * INTO v_violation FROM rule_violations WHERE id = p_violation_id; + + -- Calculate expiration + IF p_duration_hours IS NOT NULL THEN + v_expires_at := NOW() + (p_duration_hours || ' hours')::INTERVAL; + END IF; + + -- Create sanction + INSERT INTO sanctions ( + violation_id, community_id, target_user_id, + sanction_type, duration_hours, expires_at, + applied_by, applied_via + ) VALUES ( + p_violation_id, v_violation.community_id, v_violation.target_user_id, + p_sanction_type, p_duration_hours, v_expires_at, + p_applied_by, p_applied_via + ) RETURNING id INTO v_sanction_id; + + -- Update violation status + UPDATE rule_violations + SET status = 'resolved', + resolved_at = NOW(), + resolution_type = 'sanction_applied' + WHERE id = p_violation_id; + + -- Update user summary + INSERT INTO user_violation_summary (user_id, community_id, confirmed_violations, total_sanctions, last_sanction_at) + VALUES (v_violation.target_user_id, v_violation.community_id, 1, 1, NOW()) + ON CONFLICT (user_id, community_id) DO UPDATE SET + confirmed_violations = user_violation_summary.confirmed_violations + 1, + total_sanctions = user_violation_summary.total_sanctions + 1, + last_sanction_at = NOW(), + updated_at = NOW(); + + RETURN v_sanction_id; +END; +$$ LANGUAGE plpgsql; + +-- Check and expire sanctions +CREATE OR REPLACE FUNCTION expire_sanctions() RETURNS INT AS $$ +DECLARE + v_count INT; +BEGIN + UPDATE sanctions + SET status = 'completed' + WHERE status = 'active' + AND expires_at IS NOT NULL + AND expires_at < NOW(); + + GET DIAGNOSTICS v_count = ROW_COUNT; + RETURN v_count; +END; +$$ LANGUAGE plpgsql; + +-- Check if user has active sanction +CREATE OR REPLACE FUNCTION user_has_active_sanction( + p_user_id UUID, + p_community_id UUID, + p_sanction_type sanction_type DEFAULT NULL +) RETURNS BOOLEAN AS $$ +BEGIN + RETURN EXISTS ( + SELECT 1 FROM sanctions + WHERE target_user_id = p_user_id + AND community_id = p_community_id + AND status = 'active' + AND (p_sanction_type IS NULL OR sanction_type = p_sanction_type) + AND (expires_at IS NULL OR expires_at > NOW()) + ); +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- DEFAULT ESCALATION RULES +-- ============================================================================ + +-- These will be created per-community when needed + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW pending_violations AS +SELECT + rv.id, + rv.community_id, + c.name AS community_name, + cr.code AS rule_code, + cr.title AS rule_title, + cr.severity, + rv.target_user_id, + tu.username AS target_username, + rv.reported_by, + ru.username AS reporter_username, + rv.status, + rv.reported_at, + rv.report_reason +FROM rule_violations rv +JOIN communities c ON c.id = rv.community_id +JOIN community_rules cr ON cr.id = rv.rule_id +JOIN users tu ON tu.id = rv.target_user_id +LEFT JOIN users ru ON ru.id = rv.reported_by +WHERE rv.status IN ('reported', 'under_review', 'pending_vote') +ORDER BY + CASE cr.severity + WHEN 'critical' THEN 1 + WHEN 'major' THEN 2 + WHEN 'minor' THEN 3 + WHEN 'warning' THEN 4 + ELSE 5 + END, + rv.reported_at; + +CREATE OR REPLACE VIEW active_sanctions_view AS +SELECT + s.id, + s.community_id, + c.name AS community_name, + s.target_user_id, + u.username AS target_username, + s.sanction_type::text, + s.severity_level, + s.starts_at, + s.expires_at, + s.status::text, + CASE + WHEN s.expires_at IS NULL THEN NULL + ELSE EXTRACT(EPOCH FROM (s.expires_at - NOW())) / 3600 + END AS hours_remaining +FROM sanctions s +JOIN communities c ON c.id = s.community_id +JOIN users u ON u.id = s.target_user_id +WHERE s.status = 'active' +ORDER BY s.starts_at DESC; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'self_moderation_rules', + 'Community-configurable moderation rules with automatic escalation, voting on sanctions, and transparent governance of governance.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'default_vote_duration_hours', jsonb_build_object( + 'type', 'integer', + 'title', 'Default Vote Duration', + 'description', 'Default hours for community votes on sanctions', + 'default', 48, + 'minimum', 12, + 'maximum', 168 + ), + 'auto_expire_sanctions', jsonb_build_object( + 'type', 'boolean', + 'title', 'Auto-expire Sanctions', + 'description', 'Automatically expire sanctions when duration ends', + 'default', true + ), + 'escalation_cooldown_days', jsonb_build_object( + 'type', 'integer', + 'title', 'Escalation Cooldown', + 'description', 'Days of good behavior before escalation level resets', + 'default', 90, + 'minimum', 30, + 'maximum', 365 + ), + 'require_evidence', jsonb_build_object( + 'type', 'boolean', + 'title', 'Require Evidence', + 'description', 'Require evidence attachment when reporting violations', + 'default', false + ), + 'allow_anonymous_reports', jsonb_build_object( + 'type', 'boolean', + 'title', 'Allow Anonymous Reports', + 'description', 'Allow members to report violations anonymously', + 'default', false + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126280000_proposal_lifecycle.sql b/backend/migrations/20260126280000_proposal_lifecycle.sql new file mode 100644 index 0000000..2cfd1d5 --- /dev/null +++ b/backend/migrations/20260126280000_proposal_lifecycle.sql @@ -0,0 +1,564 @@ +-- ============================================================================ +-- PROPOSAL LIFECYCLE PLUGIN +-- Draft → Review → Active → Archived with versioning, diffs, and forks +-- ============================================================================ + +-- ============================================================================ +-- PROPOSAL LIFECYCLE STATUS +-- Extended status tracking beyond basic states +-- ============================================================================ + +CREATE TYPE proposal_lifecycle_status AS ENUM ( + 'draft', -- Initial creation, not yet submitted + 'submitted', -- Submitted for review + 'review', -- Under community/moderator review + 'revision', -- Sent back for revision + 'active', -- Open for discussion/voting + 'voting', -- In voting phase + 'passed', -- Voting passed, pending implementation + 'rejected', -- Voting rejected + 'implemented', -- Successfully implemented + 'archived', -- Archived (completed or abandoned) + 'withdrawn' -- Withdrawn by author +); + +-- ============================================================================ +-- PROPOSAL VERSIONS +-- Complete version history with semantic diffing +-- ============================================================================ + +CREATE TABLE proposal_versions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + version_number INT NOT NULL, + + -- Version content (full snapshot) + title VARCHAR(500) NOT NULL, + content TEXT NOT NULL, + summary TEXT, + + -- Metadata + created_by UUID NOT NULL REFERENCES users(id), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Change tracking + change_type VARCHAR(50) NOT NULL DEFAULT 'edit', -- create, edit, amendment, merge + change_summary TEXT, + + -- Diff from previous version + diff_from_previous JSONB, -- Structured diff data + + -- Status at this version + status_at_version proposal_lifecycle_status NOT NULL, + + -- Review info + reviewed_by UUID REFERENCES users(id), + reviewed_at TIMESTAMPTZ, + review_notes TEXT, + + UNIQUE(proposal_id, version_number) +); + +CREATE INDEX idx_proposal_versions_proposal ON proposal_versions(proposal_id); +CREATE INDEX idx_proposal_versions_created ON proposal_versions(proposal_id, created_at); +CREATE INDEX idx_proposal_versions_author ON proposal_versions(created_by); + +COMMENT ON TABLE proposal_versions IS 'Complete version history for proposals'; + +-- ============================================================================ +-- PROPOSAL LIFECYCLE TRACKING +-- Extended lifecycle metadata +-- ============================================================================ + +CREATE TABLE proposal_lifecycle ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE UNIQUE, + + -- Current state + current_status proposal_lifecycle_status NOT NULL DEFAULT 'draft', + current_version INT NOT NULL DEFAULT 1, + + -- Timing + submitted_at TIMESTAMPTZ, + review_started_at TIMESTAMPTZ, + activated_at TIMESTAMPTZ, + voting_started_at TIMESTAMPTZ, + voting_ended_at TIMESTAMPTZ, + resolved_at TIMESTAMPTZ, + archived_at TIMESTAMPTZ, + + -- Review tracking + review_count INT NOT NULL DEFAULT 0, + revision_count INT NOT NULL DEFAULT 0, + + -- Fork tracking + forked_from_id UUID REFERENCES proposals(id), + fork_count INT NOT NULL DEFAULT 0, + + -- Amendment tracking + amendment_count INT NOT NULL DEFAULT 0, + merged_amendments JSONB DEFAULT '[]', + + -- Metadata + lifecycle_config JSONB NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_proposal_lifecycle_proposal ON proposal_lifecycle(proposal_id); +CREATE INDEX idx_proposal_lifecycle_status ON proposal_lifecycle(current_status); +CREATE INDEX idx_proposal_lifecycle_forked ON proposal_lifecycle(forked_from_id); + +COMMENT ON TABLE proposal_lifecycle IS 'Extended lifecycle tracking for proposals'; + +-- ============================================================================ +-- PROPOSAL STATUS TRANSITIONS +-- Audit log of all status changes +-- ============================================================================ + +CREATE TABLE proposal_status_transitions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + + -- Transition + from_status proposal_lifecycle_status, + to_status proposal_lifecycle_status NOT NULL, + + -- Actor + triggered_by UUID REFERENCES users(id), + trigger_type VARCHAR(50) NOT NULL, -- manual, auto, vote, workflow + + -- Context + reason TEXT, + metadata JSONB NOT NULL DEFAULT '{}', + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_status_transitions_proposal ON proposal_status_transitions(proposal_id); +CREATE INDEX idx_status_transitions_time ON proposal_status_transitions(created_at); + +COMMENT ON TABLE proposal_status_transitions IS 'Audit trail of proposal status changes'; + +-- ============================================================================ +-- PROPOSAL FORKS +-- Track proposal forks (very FLOSS-friendly) +-- ============================================================================ + +CREATE TABLE proposal_forks ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + + -- Source + source_proposal_id UUID NOT NULL REFERENCES proposals(id), + source_version_number INT NOT NULL, + + -- Fork + fork_proposal_id UUID NOT NULL REFERENCES proposals(id) UNIQUE, + + -- Metadata + forked_by UUID NOT NULL REFERENCES users(id), + forked_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + fork_reason TEXT, + + -- Relationship status + is_competing BOOLEAN NOT NULL DEFAULT FALSE, -- Competing alternative + is_merged BOOLEAN NOT NULL DEFAULT FALSE, -- Merged back + merged_at TIMESTAMPTZ +); + +CREATE INDEX idx_proposal_forks_source ON proposal_forks(source_proposal_id); +CREATE INDEX idx_proposal_forks_fork ON proposal_forks(fork_proposal_id); + +COMMENT ON TABLE proposal_forks IS 'Proposal fork relationships'; + +-- ============================================================================ +-- PROPOSAL AMENDMENTS +-- Suggested changes from community +-- ============================================================================ + +CREATE TYPE amendment_status AS ENUM ( + 'proposed', -- Suggested by community member + 'under_review', -- Being reviewed by author/moderators + 'accepted', -- Accepted and incorporated + 'rejected', -- Rejected by author + 'withdrawn', -- Withdrawn by proposer + 'superseded' -- Superseded by another amendment +); + +CREATE TABLE proposal_amendments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + target_version INT NOT NULL, + + -- Amendment content + title VARCHAR(200) NOT NULL, + description TEXT NOT NULL, + suggested_changes JSONB NOT NULL, -- Structured change suggestions + + -- Author + proposed_by UUID NOT NULL REFERENCES users(id), + proposed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Status + status amendment_status NOT NULL DEFAULT 'proposed', + + -- Review + reviewed_by UUID REFERENCES users(id), + reviewed_at TIMESTAMPTZ, + review_response TEXT, + + -- If accepted + incorporated_in_version INT, + + -- Voting (if community votes on amendments) + vote_required BOOLEAN NOT NULL DEFAULT FALSE, + vote_result JSONB, + + -- Support tracking + support_count INT NOT NULL DEFAULT 0, + oppose_count INT NOT NULL DEFAULT 0 +); + +CREATE INDEX idx_amendments_proposal ON proposal_amendments(proposal_id); +CREATE INDEX idx_amendments_status ON proposal_amendments(status); +CREATE INDEX idx_amendments_author ON proposal_amendments(proposed_by); + +COMMENT ON TABLE proposal_amendments IS 'Community-suggested amendments to proposals'; + +-- ============================================================================ +-- AMENDMENT SUPPORT +-- Track support/opposition for amendments +-- ============================================================================ + +CREATE TABLE amendment_support ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + amendment_id UUID NOT NULL REFERENCES proposal_amendments(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + support_type VARCHAR(20) NOT NULL, -- support, oppose, neutral + comment TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(amendment_id, user_id) +); + +CREATE INDEX idx_amendment_support_amendment ON amendment_support(amendment_id); + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Create a new version of a proposal +CREATE OR REPLACE FUNCTION create_proposal_version( + p_proposal_id UUID, + p_title VARCHAR(500), + p_content TEXT, + p_summary TEXT, + p_created_by UUID, + p_change_type VARCHAR(50), + p_change_summary TEXT +) RETURNS INT AS $$ +DECLARE + v_version_number INT; + v_previous_content TEXT; + v_current_status proposal_lifecycle_status; +BEGIN + -- Get current version number and content + SELECT current_version INTO v_version_number + FROM proposal_lifecycle + WHERE proposal_id = p_proposal_id; + + IF v_version_number IS NULL THEN + v_version_number := 0; + END IF; + + v_version_number := v_version_number + 1; + + -- Get previous content for diff + SELECT content INTO v_previous_content + FROM proposal_versions + WHERE proposal_id = p_proposal_id + ORDER BY version_number DESC + LIMIT 1; + + -- Get current status + SELECT COALESCE(current_status, 'draft') INTO v_current_status + FROM proposal_lifecycle + WHERE proposal_id = p_proposal_id; + + -- Create version + INSERT INTO proposal_versions ( + proposal_id, version_number, title, content, summary, + created_by, change_type, change_summary, status_at_version, + diff_from_previous + ) VALUES ( + p_proposal_id, v_version_number, p_title, p_content, p_summary, + p_created_by, p_change_type, p_change_summary, v_current_status, + CASE WHEN v_previous_content IS NOT NULL THEN + jsonb_build_object( + 'type', 'text_diff', + 'previous_length', LENGTH(v_previous_content), + 'new_length', LENGTH(p_content), + 'changed', v_previous_content != p_content + ) + ELSE NULL END + ); + + -- Update lifecycle + INSERT INTO proposal_lifecycle (proposal_id, current_version) + VALUES (p_proposal_id, v_version_number) + ON CONFLICT (proposal_id) DO UPDATE SET + current_version = v_version_number, + revision_count = proposal_lifecycle.revision_count + 1, + updated_at = NOW(); + + RETURN v_version_number; +END; +$$ LANGUAGE plpgsql; + +-- Transition proposal status +CREATE OR REPLACE FUNCTION transition_proposal_status( + p_proposal_id UUID, + p_new_status proposal_lifecycle_status, + p_triggered_by UUID, + p_trigger_type VARCHAR(50), + p_reason TEXT DEFAULT NULL +) RETURNS BOOLEAN AS $$ +DECLARE + v_current_status proposal_lifecycle_status; + v_valid_transition BOOLEAN := FALSE; +BEGIN + -- Get current status + SELECT current_status INTO v_current_status + FROM proposal_lifecycle + WHERE proposal_id = p_proposal_id; + + -- Validate transition (simplified - can be expanded) + v_valid_transition := CASE v_current_status + WHEN 'draft' THEN p_new_status IN ('submitted', 'withdrawn') + WHEN 'submitted' THEN p_new_status IN ('review', 'revision', 'withdrawn') + WHEN 'review' THEN p_new_status IN ('revision', 'active', 'rejected') + WHEN 'revision' THEN p_new_status IN ('submitted', 'withdrawn') + WHEN 'active' THEN p_new_status IN ('voting', 'archived', 'withdrawn') + WHEN 'voting' THEN p_new_status IN ('passed', 'rejected') + WHEN 'passed' THEN p_new_status IN ('implemented', 'archived') + WHEN 'rejected' THEN p_new_status IN ('revision', 'archived') + WHEN 'implemented' THEN p_new_status IN ('archived') + ELSE FALSE + END; + + IF NOT v_valid_transition THEN + RETURN FALSE; + END IF; + + -- Log transition + INSERT INTO proposal_status_transitions ( + proposal_id, from_status, to_status, + triggered_by, trigger_type, reason + ) VALUES ( + p_proposal_id, v_current_status, p_new_status, + p_triggered_by, p_trigger_type, p_reason + ); + + -- Update lifecycle + UPDATE proposal_lifecycle + SET current_status = p_new_status, + updated_at = NOW(), + submitted_at = CASE WHEN p_new_status = 'submitted' THEN NOW() ELSE submitted_at END, + review_started_at = CASE WHEN p_new_status = 'review' THEN NOW() ELSE review_started_at END, + activated_at = CASE WHEN p_new_status = 'active' THEN NOW() ELSE activated_at END, + voting_started_at = CASE WHEN p_new_status = 'voting' THEN NOW() ELSE voting_started_at END, + voting_ended_at = CASE WHEN p_new_status IN ('passed', 'rejected') THEN NOW() ELSE voting_ended_at END, + resolved_at = CASE WHEN p_new_status IN ('implemented', 'rejected', 'withdrawn') THEN NOW() ELSE resolved_at END, + archived_at = CASE WHEN p_new_status = 'archived' THEN NOW() ELSE archived_at END + WHERE proposal_id = p_proposal_id; + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +-- Fork a proposal +CREATE OR REPLACE FUNCTION fork_proposal( + p_source_proposal_id UUID, + p_forked_by UUID, + p_fork_reason TEXT, + p_community_id UUID +) RETURNS UUID AS $$ +DECLARE + v_source_version INT; + v_source_title VARCHAR(500); + v_source_content TEXT; + v_new_proposal_id UUID; +BEGIN + -- Get source proposal latest version + SELECT pv.version_number, pv.title, pv.content + INTO v_source_version, v_source_title, v_source_content + FROM proposal_versions pv + JOIN proposal_lifecycle pl ON pl.proposal_id = pv.proposal_id + WHERE pv.proposal_id = p_source_proposal_id + AND pv.version_number = pl.current_version; + + -- Create new proposal + INSERT INTO proposals (community_id, author_id, title, content, status) + VALUES (p_community_id, p_forked_by, + '[Fork] ' || v_source_title, + v_source_content, + 'draft') + RETURNING id INTO v_new_proposal_id; + + -- Initialize lifecycle + INSERT INTO proposal_lifecycle (proposal_id, current_status, forked_from_id) + VALUES (v_new_proposal_id, 'draft', p_source_proposal_id); + + -- Create initial version + PERFORM create_proposal_version( + v_new_proposal_id, + '[Fork] ' || v_source_title, + v_source_content, + 'Forked from proposal ' || p_source_proposal_id, + p_forked_by, + 'create', + 'Fork created' + ); + + -- Record fork relationship + INSERT INTO proposal_forks ( + source_proposal_id, source_version_number, + fork_proposal_id, forked_by, fork_reason + ) VALUES ( + p_source_proposal_id, v_source_version, + v_new_proposal_id, p_forked_by, p_fork_reason + ); + + -- Update source fork count + UPDATE proposal_lifecycle + SET fork_count = fork_count + 1 + WHERE proposal_id = p_source_proposal_id; + + RETURN v_new_proposal_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW proposal_lifecycle_summary AS +SELECT + p.id AS proposal_id, + p.title, + p.community_id, + c.name AS community_name, + pl.current_status::text AS status, + pl.current_version, + pl.submitted_at, + pl.activated_at, + pl.voting_started_at, + pl.resolved_at, + pl.revision_count, + pl.fork_count, + pl.amendment_count, + pl.forked_from_id, + u.username AS author_username, + p.created_at +FROM proposals p +JOIN proposal_lifecycle pl ON pl.proposal_id = p.id +JOIN communities c ON c.id = p.community_id +JOIN users u ON u.id = p.author_id +ORDER BY p.created_at DESC; + +CREATE OR REPLACE VIEW proposal_version_history AS +SELECT + pv.proposal_id, + pv.version_number, + pv.title, + pv.change_type, + pv.change_summary, + pv.status_at_version::text AS status, + u.username AS author_username, + pv.created_at, + pv.reviewed_by IS NOT NULL AS was_reviewed +FROM proposal_versions pv +JOIN users u ON u.id = pv.created_by +ORDER BY pv.proposal_id, pv.version_number DESC; + +-- ============================================================================ +-- TRIGGERS +-- ============================================================================ + +-- Auto-initialize lifecycle when proposal is created +CREATE OR REPLACE FUNCTION auto_init_proposal_lifecycle() +RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO proposal_lifecycle (proposal_id, current_status) + VALUES (NEW.id, 'draft') + ON CONFLICT DO NOTHING; + + -- Create initial version + INSERT INTO proposal_versions ( + proposal_id, version_number, title, content, + created_by, change_type, status_at_version + ) VALUES ( + NEW.id, 1, NEW.title, NEW.description, + NEW.author_id, 'create', 'draft' + ) ON CONFLICT DO NOTHING; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_auto_init_proposal_lifecycle +AFTER INSERT ON proposals +FOR EACH ROW +EXECUTE FUNCTION auto_init_proposal_lifecycle(); + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'proposal_lifecycle', + 'Complete proposal lifecycle management with versioning, semantic diffs, amendments, and FLOSS-style forking. Tracks Draft → Review → Active → Archived transitions.', + '1.0.0', + true, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'require_review', jsonb_build_object( + 'type', 'boolean', + 'title', 'Require Review', + 'description', 'Require moderator review before proposals become active', + 'default', true + ), + 'allow_forks', jsonb_build_object( + 'type', 'boolean', + 'title', 'Allow Forks', + 'description', 'Allow community members to fork proposals', + 'default', true + ), + 'allow_amendments', jsonb_build_object( + 'type', 'boolean', + 'title', 'Allow Amendments', + 'description', 'Allow community members to suggest amendments', + 'default', true + ), + 'amendment_vote_threshold', jsonb_build_object( + 'type', 'integer', + 'title', 'Amendment Vote Threshold', + 'description', 'Support count required before amendment is considered', + 'default', 5, + 'minimum', 1 + ), + 'max_revisions', jsonb_build_object( + 'type', 'integer', + 'title', 'Max Revisions', + 'description', 'Maximum times a proposal can be sent back for revision', + 'default', 3, + 'minimum', 1, + 'maximum', 10 + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126290000_governance_analytics.sql b/backend/migrations/20260126290000_governance_analytics.sql new file mode 100644 index 0000000..833b24c --- /dev/null +++ b/backend/migrations/20260126290000_governance_analytics.sql @@ -0,0 +1,502 @@ +-- ============================================================================ +-- GOVERNANCE ANALYTICS PLUGIN +-- Participation metrics, delegation distribution, decision load analytics +-- No individual ranking - focuses on aggregate health metrics +-- ============================================================================ + +-- ============================================================================ +-- PARTICIPATION SNAPSHOTS +-- Periodic snapshots of participation metrics +-- ============================================================================ + +CREATE TABLE participation_snapshots ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + snapshot_date DATE NOT NULL, + snapshot_type VARCHAR(50) NOT NULL DEFAULT 'daily', -- daily, weekly, monthly + + -- Member counts + total_members INT NOT NULL DEFAULT 0, + active_members INT NOT NULL DEFAULT 0, -- Active in period + new_members INT NOT NULL DEFAULT 0, + churned_members INT NOT NULL DEFAULT 0, + + -- Proposal metrics + proposals_created INT NOT NULL DEFAULT 0, + proposals_passed INT NOT NULL DEFAULT 0, + proposals_rejected INT NOT NULL DEFAULT 0, + proposals_active INT NOT NULL DEFAULT 0, + + -- Voting metrics + votes_cast INT NOT NULL DEFAULT 0, + unique_voters INT NOT NULL DEFAULT 0, + avg_votes_per_proposal DECIMAL(10,2), + voter_turnout_rate DECIMAL(5,4), -- percentage + + -- Discussion metrics + comments_created INT NOT NULL DEFAULT 0, + unique_commenters INT NOT NULL DEFAULT 0, + avg_comments_per_proposal DECIMAL(10,2), + + -- Delegation metrics + active_delegations INT NOT NULL DEFAULT 0, + delegation_depth_avg DECIMAL(5,2), + delegation_concentration DECIMAL(5,4), -- Gini-like measure + + -- Engagement score (aggregate, not individual) + engagement_score DECIMAL(5,2), + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, snapshot_date, snapshot_type) +); + +CREATE INDEX idx_participation_snapshots_community ON participation_snapshots(community_id); +CREATE INDEX idx_participation_snapshots_date ON participation_snapshots(snapshot_date); +CREATE INDEX idx_participation_snapshots_lookup ON participation_snapshots(community_id, snapshot_date DESC); + +COMMENT ON TABLE participation_snapshots IS 'Periodic participation metrics snapshots'; + +-- ============================================================================ +-- DELEGATION ANALYTICS +-- Aggregate delegation network analysis (no individual tracking) +-- ============================================================================ + +CREATE TABLE delegation_analytics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + topic_id UUID, -- Optional topic filtering + snapshot_date DATE NOT NULL, + + -- Network metrics + total_delegations INT NOT NULL DEFAULT 0, + unique_delegators INT NOT NULL DEFAULT 0, + unique_delegates INT NOT NULL DEFAULT 0, + + -- Chain analysis + max_chain_depth INT NOT NULL DEFAULT 0, + avg_chain_depth DECIMAL(5,2), + delegation_cycles_detected INT NOT NULL DEFAULT 0, + + -- Concentration metrics (no individual data) + top_10_delegate_share DECIMAL(5,4), -- Share of votes held by top 10 delegates + herfindahl_index DECIMAL(7,6), -- Market concentration measure + effective_delegates INT, -- Number of delegates holding 50% of delegated power + + -- Activity + delegations_created INT NOT NULL DEFAULT 0, + delegations_revoked INT NOT NULL DEFAULT 0, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, topic_id, snapshot_date) +); + +CREATE INDEX idx_delegation_analytics_community ON delegation_analytics(community_id); +CREATE INDEX idx_delegation_analytics_topic ON delegation_analytics(topic_id); + +COMMENT ON TABLE delegation_analytics IS 'Aggregate delegation network metrics'; + +-- ============================================================================ +-- DECISION LOAD METRICS +-- Track governance workload and throughput +-- ============================================================================ + +CREATE TABLE decision_load_metrics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + period_start DATE NOT NULL, + period_end DATE NOT NULL, + + -- Workload + proposals_in_pipeline INT NOT NULL DEFAULT 0, + proposals_needing_review INT NOT NULL DEFAULT 0, + proposals_in_voting INT NOT NULL DEFAULT 0, + + -- Throughput + decisions_made INT NOT NULL DEFAULT 0, + avg_decision_time_hours DECIMAL(10,2), + median_decision_time_hours DECIMAL(10,2), + + -- Quality indicators + proposals_requiring_revision INT NOT NULL DEFAULT 0, + revision_rate DECIMAL(5,4), + quorum_achievement_rate DECIMAL(5,4), + + -- Bottlenecks + stalled_proposals INT NOT NULL DEFAULT 0, + bottleneck_phase VARCHAR(50), + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, period_start, period_end) +); + +CREATE INDEX idx_decision_load_community ON decision_load_metrics(community_id); + +COMMENT ON TABLE decision_load_metrics IS 'Governance workload and throughput metrics'; + +-- ============================================================================ +-- VOTING METHOD ANALYTICS +-- Compare effectiveness of different voting methods +-- ============================================================================ + +CREATE TABLE voting_method_analytics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + voting_method VARCHAR(50) NOT NULL, + period_start DATE NOT NULL, + period_end DATE NOT NULL, + + -- Usage + proposals_using_method INT NOT NULL DEFAULT 0, + total_votes_cast INT NOT NULL DEFAULT 0, + + -- Participation + avg_turnout DECIMAL(5,4), + avg_time_to_decide_hours DECIMAL(10,2), + + -- Outcomes + decisive_results INT NOT NULL DEFAULT 0, -- Clear winner + close_results INT NOT NULL DEFAULT 0, -- Narrow margins + tie_results INT NOT NULL DEFAULT 0, + + -- Satisfaction proxy (based on post-vote engagement) + post_decision_engagement_rate DECIMAL(5,4), + appeal_rate DECIMAL(5,4), + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, voting_method, period_start, period_end) +); + +CREATE INDEX idx_voting_method_analytics_community ON voting_method_analytics(community_id); + +COMMENT ON TABLE voting_method_analytics IS 'Voting method effectiveness comparison'; + +-- ============================================================================ +-- HEALTH INDICATORS +-- Overall governance health dashboard data +-- ============================================================================ + +CREATE TABLE governance_health_indicators ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + calculated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Participation health (0-100) + participation_score DECIMAL(5,2), + participation_trend VARCHAR(20), -- improving, stable, declining + + -- Inclusion health + inclusion_score DECIMAL(5,2), + new_voice_ratio DECIMAL(5,4), -- New participants in decisions + + -- Efficiency health + efficiency_score DECIMAL(5,2), + decision_velocity DECIMAL(10,2), -- Decisions per week + + -- Legitimacy health + legitimacy_score DECIMAL(5,2), + avg_quorum_margin DECIMAL(5,4), -- How much above quorum + + -- Delegation health + delegation_health_score DECIMAL(5,2), + power_concentration_risk VARCHAR(20), -- low, medium, high + + -- Overall + overall_health_score DECIMAL(5,2), + recommendations JSONB, + + UNIQUE(community_id, calculated_at) +); + +CREATE INDEX idx_health_indicators_community ON governance_health_indicators(community_id); +CREATE INDEX idx_health_indicators_time ON governance_health_indicators(community_id, calculated_at DESC); + +COMMENT ON TABLE governance_health_indicators IS 'Composite governance health metrics'; + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Calculate participation snapshot for a community +CREATE OR REPLACE FUNCTION calculate_participation_snapshot( + p_community_id UUID, + p_date DATE DEFAULT CURRENT_DATE +) RETURNS UUID AS $$ +DECLARE + v_snapshot_id UUID; + v_total_members INT; + v_active_members INT; + v_proposals_created INT; + v_votes_cast INT; + v_unique_voters INT; + v_active_delegations INT; +BEGIN + -- Get member counts + SELECT COUNT(*) INTO v_total_members + FROM community_members WHERE community_id = p_community_id; + + -- Active members (any activity in last 30 days) + SELECT COUNT(DISTINCT user_id) INTO v_active_members + FROM ( + SELECT author_id AS user_id FROM proposals + WHERE community_id = p_community_id AND created_at > p_date - INTERVAL '30 days' + UNION + SELECT user_id FROM votes v + JOIN proposals p ON p.id = v.proposal_id + WHERE p.community_id = p_community_id AND v.created_at > p_date - INTERVAL '30 days' + UNION + SELECT author_id FROM comments c + JOIN proposals p ON p.id = c.proposal_id + WHERE p.community_id = p_community_id AND c.created_at > p_date - INTERVAL '30 days' + ) activity; + + -- Proposals in period + SELECT COUNT(*) INTO v_proposals_created + FROM proposals + WHERE community_id = p_community_id + AND created_at::DATE = p_date; + + -- Votes in period + SELECT COUNT(*), COUNT(DISTINCT user_id) + INTO v_votes_cast, v_unique_voters + FROM votes v + JOIN proposals p ON p.id = v.proposal_id + WHERE p.community_id = p_community_id + AND v.created_at::DATE = p_date; + + -- Active delegations + SELECT COUNT(*) INTO v_active_delegations + FROM delegations + WHERE community_id = p_community_id + AND (expires_at IS NULL OR expires_at > NOW()); + + -- Insert snapshot + INSERT INTO participation_snapshots ( + community_id, snapshot_date, snapshot_type, + total_members, active_members, + proposals_created, votes_cast, unique_voters, + active_delegations, + voter_turnout_rate, + engagement_score + ) VALUES ( + p_community_id, p_date, 'daily', + v_total_members, v_active_members, + v_proposals_created, v_votes_cast, v_unique_voters, + v_active_delegations, + CASE WHEN v_total_members > 0 THEN v_unique_voters::DECIMAL / v_total_members ELSE 0 END, + CASE WHEN v_total_members > 0 THEN v_active_members::DECIMAL / v_total_members * 100 ELSE 0 END + ) + ON CONFLICT (community_id, snapshot_date, snapshot_type) DO UPDATE SET + total_members = EXCLUDED.total_members, + active_members = EXCLUDED.active_members, + proposals_created = EXCLUDED.proposals_created, + votes_cast = EXCLUDED.votes_cast, + unique_voters = EXCLUDED.unique_voters, + active_delegations = EXCLUDED.active_delegations, + voter_turnout_rate = EXCLUDED.voter_turnout_rate, + engagement_score = EXCLUDED.engagement_score + RETURNING id INTO v_snapshot_id; + + RETURN v_snapshot_id; +END; +$$ LANGUAGE plpgsql; + +-- Calculate delegation concentration (Herfindahl Index) +CREATE OR REPLACE FUNCTION calculate_delegation_concentration( + p_community_id UUID +) RETURNS DECIMAL AS $$ +DECLARE + v_hhi DECIMAL; + v_total_delegated DECIMAL; +BEGIN + -- Get total delegated voting power + SELECT COALESCE(SUM(voting_power), 0) INTO v_total_delegated + FROM delegations + WHERE community_id = p_community_id + AND (expires_at IS NULL OR expires_at > NOW()); + + IF v_total_delegated = 0 THEN + RETURN 0; + END IF; + + -- Calculate HHI (sum of squared market shares) + SELECT COALESCE(SUM(POWER(delegate_share, 2)), 0) INTO v_hhi + FROM ( + SELECT + delegate_id, + SUM(voting_power)::DECIMAL / v_total_delegated AS delegate_share + FROM delegations + WHERE community_id = p_community_id + AND (expires_at IS NULL OR expires_at > NOW()) + GROUP BY delegate_id + ) shares; + + RETURN v_hhi; +END; +$$ LANGUAGE plpgsql; + +-- Calculate governance health score +CREATE OR REPLACE FUNCTION calculate_governance_health( + p_community_id UUID +) RETURNS UUID AS $$ +DECLARE + v_health_id UUID; + v_participation_score DECIMAL; + v_efficiency_score DECIMAL; + v_delegation_score DECIMAL; + v_overall_score DECIMAL; + v_hhi DECIMAL; + v_power_risk VARCHAR(20); +BEGIN + -- Participation score (based on recent activity) + SELECT COALESCE(AVG(engagement_score), 50) INTO v_participation_score + FROM participation_snapshots + WHERE community_id = p_community_id + AND snapshot_date > CURRENT_DATE - INTERVAL '30 days'; + + -- Efficiency score (based on decision throughput) + SELECT COALESCE( + 100 - (COALESCE(AVG(avg_decision_time_hours), 168) / 168 * 50), -- Penalize slow decisions + 50 + ) INTO v_efficiency_score + FROM decision_load_metrics + WHERE community_id = p_community_id + AND period_end > CURRENT_DATE - INTERVAL '30 days'; + + -- Delegation health + v_hhi := calculate_delegation_concentration(p_community_id); + v_delegation_score := 100 - (v_hhi * 100); -- Lower HHI = better + + -- Determine power concentration risk + v_power_risk := CASE + WHEN v_hhi > 0.25 THEN 'high' + WHEN v_hhi > 0.15 THEN 'medium' + ELSE 'low' + END; + + -- Overall score (weighted average) + v_overall_score := (v_participation_score * 0.4 + v_efficiency_score * 0.3 + v_delegation_score * 0.3); + + -- Insert health record + INSERT INTO governance_health_indicators ( + community_id, + participation_score, + participation_trend, + efficiency_score, + delegation_health_score, + power_concentration_risk, + overall_health_score, + recommendations + ) VALUES ( + p_community_id, + v_participation_score, + 'stable', -- Would need trend calculation + v_efficiency_score, + v_delegation_score, + v_power_risk, + v_overall_score, + jsonb_build_array( + CASE WHEN v_participation_score < 50 THEN 'Consider outreach to increase participation' END, + CASE WHEN v_efficiency_score < 50 THEN 'Review workflow to reduce decision time' END, + CASE WHEN v_power_risk = 'high' THEN 'High delegation concentration - encourage diverse delegation' END + ) + ) + RETURNING id INTO v_health_id; + + RETURN v_health_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW community_health_dashboard AS +SELECT + c.id AS community_id, + c.name AS community_name, + ghi.overall_health_score, + ghi.participation_score, + ghi.efficiency_score, + ghi.delegation_health_score, + ghi.power_concentration_risk, + ghi.calculated_at AS last_calculated, + ps.total_members, + ps.active_members, + ps.voter_turnout_rate +FROM communities c +LEFT JOIN LATERAL ( + SELECT * FROM governance_health_indicators + WHERE community_id = c.id + ORDER BY calculated_at DESC + LIMIT 1 +) ghi ON true +LEFT JOIN LATERAL ( + SELECT * FROM participation_snapshots + WHERE community_id = c.id + ORDER BY snapshot_date DESC + LIMIT 1 +) ps ON true; + +CREATE OR REPLACE VIEW participation_trends AS +SELECT + community_id, + snapshot_date, + total_members, + active_members, + ROUND(active_members::DECIMAL / NULLIF(total_members, 0) * 100, 2) AS active_rate, + votes_cast, + unique_voters, + voter_turnout_rate, + engagement_score, + LAG(engagement_score) OVER (PARTITION BY community_id ORDER BY snapshot_date) AS prev_engagement, + engagement_score - LAG(engagement_score) OVER (PARTITION BY community_id ORDER BY snapshot_date) AS engagement_change +FROM participation_snapshots +WHERE snapshot_type = 'daily' +ORDER BY community_id, snapshot_date DESC; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'governance_analytics', + 'Aggregate governance analytics including participation metrics, delegation distribution, and decision load tracking. No individual ranking - focuses on community health.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'snapshot_frequency', jsonb_build_object( + 'type', 'string', + 'title', 'Snapshot Frequency', + 'description', 'How often to calculate participation snapshots', + 'enum', ARRAY['hourly', 'daily', 'weekly'], + 'default', 'daily' + ), + 'health_check_frequency', jsonb_build_object( + 'type', 'string', + 'title', 'Health Check Frequency', + 'description', 'How often to calculate governance health', + 'enum', ARRAY['daily', 'weekly'], + 'default', 'weekly' + ), + 'retention_days', jsonb_build_object( + 'type', 'integer', + 'title', 'Data Retention', + 'description', 'Days to retain detailed analytics data', + 'default', 365, + 'minimum', 30 + ), + 'public_dashboard', jsonb_build_object( + 'type', 'boolean', + 'title', 'Public Dashboard', + 'description', 'Make analytics dashboard visible to all members', + 'default', true + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126300000_conflict_resolution.sql b/backend/migrations/20260126300000_conflict_resolution.sql new file mode 100644 index 0000000..5bea740 --- /dev/null +++ b/backend/migrations/20260126300000_conflict_resolution.sql @@ -0,0 +1,539 @@ +-- ============================================================================ +-- CONFLICT RESOLUTION PLUGIN +-- Structured mediation, compromise proposals, assisted voting +-- Inspired by real-world FLOSS practices +-- ============================================================================ + +-- ============================================================================ +-- CONFLICT CASES +-- Track conflicts requiring resolution +-- ============================================================================ + +CREATE TYPE conflict_status AS ENUM ( + 'reported', -- Initial report + 'acknowledged', -- Acknowledged by moderators + 'mediation', -- Active mediation in progress + 'proposal_phase', -- Compromise proposals being developed + 'voting', -- Community voting on resolution + 'resolved', -- Successfully resolved + 'escalated', -- Escalated to higher authority + 'closed' -- Closed without resolution +); + +CREATE TYPE conflict_type AS ENUM ( + 'interpersonal', -- Between community members + 'technical', -- Technical disagreement + 'governance', -- Governance/policy disagreement + 'code_of_conduct', -- CoC violation dispute + 'proposal', -- Dispute over a proposal + 'moderation', -- Dispute over moderation action + 'other' +); + +CREATE TABLE conflict_cases ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + + -- Conflict details + title VARCHAR(300) NOT NULL, + description TEXT NOT NULL, + conflict_type conflict_type NOT NULL, + status conflict_status NOT NULL DEFAULT 'reported', + + -- Parties involved (anonymized IDs for privacy) + party_a_id UUID NOT NULL REFERENCES users(id), + party_b_id UUID REFERENCES users(id), -- Optional for non-interpersonal + affected_parties UUID[] DEFAULT '{}', + + -- Related content + related_proposal_id UUID REFERENCES proposals(id), + related_content_ids UUID[] DEFAULT '{}', + + -- Reporter (can be anonymous) + reported_by UUID REFERENCES users(id), + reported_anonymously BOOLEAN NOT NULL DEFAULT FALSE, + reported_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Severity and urgency + severity_level INT NOT NULL DEFAULT 2, -- 1-5 + is_urgent BOOLEAN NOT NULL DEFAULT FALSE, + + -- Resolution tracking + resolved_at TIMESTAMPTZ, + resolution_type VARCHAR(50), + resolution_summary TEXT, + + -- Metadata + tags VARCHAR(50)[] DEFAULT '{}', + is_public BOOLEAN NOT NULL DEFAULT FALSE, -- Public visibility + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_conflict_cases_community ON conflict_cases(community_id); +CREATE INDEX idx_conflict_cases_status ON conflict_cases(status); +CREATE INDEX idx_conflict_cases_parties ON conflict_cases(party_a_id, party_b_id); +CREATE INDEX idx_conflict_cases_pending ON conflict_cases(community_id, status) + WHERE status NOT IN ('resolved', 'closed'); + +COMMENT ON TABLE conflict_cases IS 'Tracked conflicts requiring resolution'; + +-- ============================================================================ +-- MEDIATORS +-- Assigned mediators for conflict cases +-- ============================================================================ + +CREATE TYPE mediator_role AS ENUM ( + 'lead', -- Lead mediator + 'assistant', -- Assistant mediator + 'observer', -- Observer (learning) + 'specialist' -- Subject matter specialist +); + +CREATE TABLE conflict_mediators ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + conflict_id UUID NOT NULL REFERENCES conflict_cases(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + role mediator_role NOT NULL DEFAULT 'assistant', + + -- Assignment + assigned_by UUID REFERENCES users(id), + assigned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Status + accepted BOOLEAN, + accepted_at TIMESTAMPTZ, + recused BOOLEAN NOT NULL DEFAULT FALSE, + recusal_reason TEXT, + + -- Activity + last_activity_at TIMESTAMPTZ, + notes_count INT NOT NULL DEFAULT 0, + + UNIQUE(conflict_id, user_id) +); + +CREATE INDEX idx_conflict_mediators_conflict ON conflict_mediators(conflict_id); +CREATE INDEX idx_conflict_mediators_user ON conflict_mediators(user_id); + +COMMENT ON TABLE conflict_mediators IS 'Mediators assigned to conflict cases'; + +-- ============================================================================ +-- MEDIATION SESSIONS +-- Scheduled mediation sessions +-- ============================================================================ + +CREATE TYPE session_status AS ENUM ( + 'scheduled', + 'in_progress', + 'completed', + 'cancelled', + 'rescheduled' +); + +CREATE TABLE mediation_sessions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + conflict_id UUID NOT NULL REFERENCES conflict_cases(id) ON DELETE CASCADE, + + -- Scheduling + session_number INT NOT NULL, + scheduled_at TIMESTAMPTZ NOT NULL, + duration_minutes INT NOT NULL DEFAULT 60, + + -- Status + status session_status NOT NULL DEFAULT 'scheduled', + started_at TIMESTAMPTZ, + ended_at TIMESTAMPTZ, + + -- Participants + attendees UUID[] NOT NULL DEFAULT '{}', + absent_parties UUID[] DEFAULT '{}', + + -- Session content + agenda TEXT, + summary TEXT, + action_items JSONB DEFAULT '[]', + + -- Follow-up + next_session_needed BOOLEAN, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_mediation_sessions_conflict ON mediation_sessions(conflict_id); +CREATE INDEX idx_mediation_sessions_scheduled ON mediation_sessions(scheduled_at); + +COMMENT ON TABLE mediation_sessions IS 'Scheduled mediation sessions'; + +-- ============================================================================ +-- MEDIATION NOTES +-- Private notes from mediators (confidential) +-- ============================================================================ + +CREATE TABLE mediation_notes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + conflict_id UUID NOT NULL REFERENCES conflict_cases(id) ON DELETE CASCADE, + session_id UUID REFERENCES mediation_sessions(id) ON DELETE SET NULL, + author_id UUID NOT NULL REFERENCES users(id), + + -- Note content + content TEXT NOT NULL, + is_confidential BOOLEAN NOT NULL DEFAULT TRUE, + visibility VARCHAR(50) NOT NULL DEFAULT 'mediators', -- mediators, parties, public + + -- Categorization + note_type VARCHAR(50) NOT NULL DEFAULT 'observation', -- observation, concern, progress, action + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_mediation_notes_conflict ON mediation_notes(conflict_id); +CREATE INDEX idx_mediation_notes_session ON mediation_notes(session_id); + +COMMENT ON TABLE mediation_notes IS 'Confidential mediator notes'; + +-- ============================================================================ +-- COMPROMISE PROPOSALS +-- Proposed resolutions/compromises +-- ============================================================================ + +CREATE TYPE compromise_status AS ENUM ( + 'draft', + 'proposed', + 'under_review', + 'accepted', + 'rejected', + 'modified', + 'implemented' +); + +CREATE TABLE compromise_proposals ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + conflict_id UUID NOT NULL REFERENCES conflict_cases(id) ON DELETE CASCADE, + + -- Proposal details + title VARCHAR(300) NOT NULL, + description TEXT NOT NULL, + proposed_actions JSONB NOT NULL DEFAULT '[]', + + -- Author + proposed_by UUID NOT NULL REFERENCES users(id), + proposed_by_role VARCHAR(50) NOT NULL, -- mediator, party_a, party_b, community + proposed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Status + status compromise_status NOT NULL DEFAULT 'draft', + + -- Acceptance tracking + party_a_response VARCHAR(20), -- accept, reject, counter + party_a_response_at TIMESTAMPTZ, + party_a_feedback TEXT, + + party_b_response VARCHAR(20), + party_b_response_at TIMESTAMPTZ, + party_b_feedback TEXT, + + -- Community vote (if needed) + requires_community_vote BOOLEAN NOT NULL DEFAULT FALSE, + vote_proposal_id UUID REFERENCES proposals(id), + + -- Implementation + implementation_deadline TIMESTAMPTZ, + implemented_at TIMESTAMPTZ, + implementation_notes TEXT, + + version INT NOT NULL DEFAULT 1, + parent_proposal_id UUID REFERENCES compromise_proposals(id), + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_compromise_proposals_conflict ON compromise_proposals(conflict_id); +CREATE INDEX idx_compromise_proposals_status ON compromise_proposals(status); + +COMMENT ON TABLE compromise_proposals IS 'Proposed compromise resolutions'; + +-- ============================================================================ +-- CONFLICT HISTORY +-- Audit trail of all actions taken +-- ============================================================================ + +CREATE TABLE conflict_history ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + conflict_id UUID NOT NULL REFERENCES conflict_cases(id) ON DELETE CASCADE, + + -- Action details + action_type VARCHAR(100) NOT NULL, + action_description TEXT, + + -- Actor + actor_id UUID REFERENCES users(id), + actor_role VARCHAR(50), + + -- Changes + old_state JSONB, + new_state JSONB, + + -- Visibility + is_public BOOLEAN NOT NULL DEFAULT FALSE, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_conflict_history_conflict ON conflict_history(conflict_id); +CREATE INDEX idx_conflict_history_time ON conflict_history(created_at); + +COMMENT ON TABLE conflict_history IS 'Audit trail of conflict resolution actions'; + +-- ============================================================================ +-- MEDIATOR POOL +-- Community members trained/available for mediation +-- ============================================================================ + +CREATE TABLE mediator_pool ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + + -- Qualification + is_trained BOOLEAN NOT NULL DEFAULT FALSE, + trained_at TIMESTAMPTZ, + certification_level VARCHAR(50), -- basic, intermediate, advanced + specializations VARCHAR(100)[] DEFAULT '{}', + + -- Availability + is_available BOOLEAN NOT NULL DEFAULT TRUE, + max_concurrent_cases INT NOT NULL DEFAULT 2, + + -- Statistics + cases_mediated INT NOT NULL DEFAULT 0, + successful_resolutions INT NOT NULL DEFAULT 0, + avg_resolution_days DECIMAL(5,1), + + -- Feedback (aggregate, not individual) + satisfaction_score DECIMAL(3,2), -- 0-5 + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, user_id) +); + +CREATE INDEX idx_mediator_pool_community ON mediator_pool(community_id); +CREATE INDEX idx_mediator_pool_available ON mediator_pool(community_id, is_available) WHERE is_available = true; + +COMMENT ON TABLE mediator_pool IS 'Pool of available mediators'; + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Assign mediators to a conflict +CREATE OR REPLACE FUNCTION assign_mediators( + p_conflict_id UUID, + p_assigned_by UUID +) RETURNS INT AS $$ +DECLARE + v_community_id UUID; + v_count INT := 0; + v_mediator RECORD; +BEGIN + -- Get community + SELECT community_id INTO v_community_id + FROM conflict_cases WHERE id = p_conflict_id; + + -- Find available mediators (not party to conflict) + FOR v_mediator IN + SELECT mp.user_id + FROM mediator_pool mp + WHERE mp.community_id = v_community_id + AND mp.is_available = true + AND mp.user_id NOT IN ( + SELECT party_a_id FROM conflict_cases WHERE id = p_conflict_id + UNION + SELECT party_b_id FROM conflict_cases WHERE id = p_conflict_id AND party_b_id IS NOT NULL + ) + AND (SELECT COUNT(*) FROM conflict_mediators cm + JOIN conflict_cases cc ON cc.id = cm.conflict_id + WHERE cm.user_id = mp.user_id AND cc.status NOT IN ('resolved', 'closed') + ) < mp.max_concurrent_cases + ORDER BY mp.cases_mediated ASC, RANDOM() + LIMIT 2 + LOOP + INSERT INTO conflict_mediators (conflict_id, user_id, role, assigned_by) + VALUES (p_conflict_id, v_mediator.user_id, + CASE WHEN v_count = 0 THEN 'lead' ELSE 'assistant' END::mediator_role, + p_assigned_by) + ON CONFLICT DO NOTHING; + v_count := v_count + 1; + END LOOP; + + -- Update conflict status if mediators assigned + IF v_count > 0 THEN + UPDATE conflict_cases + SET status = 'acknowledged', updated_at = NOW() + WHERE id = p_conflict_id AND status = 'reported'; + + -- Log action + INSERT INTO conflict_history (conflict_id, action_type, action_description, actor_id) + VALUES (p_conflict_id, 'mediators_assigned', + v_count || ' mediator(s) assigned', p_assigned_by); + END IF; + + RETURN v_count; +END; +$$ LANGUAGE plpgsql; + +-- Transition conflict status +CREATE OR REPLACE FUNCTION transition_conflict_status( + p_conflict_id UUID, + p_new_status conflict_status, + p_actor_id UUID, + p_notes TEXT DEFAULT NULL +) RETURNS BOOLEAN AS $$ +DECLARE + v_old_status conflict_status; +BEGIN + SELECT status INTO v_old_status FROM conflict_cases WHERE id = p_conflict_id; + + -- Update status + UPDATE conflict_cases + SET status = p_new_status, + updated_at = NOW(), + resolved_at = CASE WHEN p_new_status IN ('resolved', 'closed') THEN NOW() ELSE resolved_at END + WHERE id = p_conflict_id; + + -- Log transition + INSERT INTO conflict_history (conflict_id, action_type, action_description, actor_id, old_state, new_state) + VALUES (p_conflict_id, 'status_change', p_notes, p_actor_id, + jsonb_build_object('status', v_old_status), + jsonb_build_object('status', p_new_status)); + + RETURN TRUE; +END; +$$ LANGUAGE plpgsql; + +-- Calculate conflict resolution statistics +CREATE OR REPLACE FUNCTION get_conflict_statistics(p_community_id UUID) +RETURNS TABLE ( + total_conflicts BIGINT, + resolved_conflicts BIGINT, + avg_resolution_days DECIMAL, + mediation_success_rate DECIMAL, + active_conflicts BIGINT +) AS $$ +BEGIN + RETURN QUERY + SELECT + COUNT(*)::BIGINT, + COUNT(*) FILTER (WHERE status = 'resolved')::BIGINT, + AVG(EXTRACT(EPOCH FROM (resolved_at - created_at)) / 86400)::DECIMAL(10,2), + (COUNT(*) FILTER (WHERE status = 'resolved')::DECIMAL / NULLIF(COUNT(*) FILTER (WHERE status IN ('resolved', 'closed')), 0))::DECIMAL(5,4), + COUNT(*) FILTER (WHERE status NOT IN ('resolved', 'closed'))::BIGINT + FROM conflict_cases + WHERE community_id = p_community_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW active_conflicts AS +SELECT + cc.id, + cc.community_id, + c.name AS community_name, + cc.title, + cc.conflict_type::text, + cc.status::text, + cc.severity_level, + cc.is_urgent, + cc.reported_at, + ARRAY_AGG(DISTINCT u.username) FILTER (WHERE cm.role = 'lead') AS lead_mediators, + COUNT(DISTINCT cp.id) AS compromise_proposals_count, + COUNT(DISTINCT ms.id) AS sessions_count +FROM conflict_cases cc +JOIN communities c ON c.id = cc.community_id +LEFT JOIN conflict_mediators cm ON cm.conflict_id = cc.id +LEFT JOIN users u ON u.id = cm.user_id +LEFT JOIN compromise_proposals cp ON cp.conflict_id = cc.id +LEFT JOIN mediation_sessions ms ON ms.conflict_id = cc.id +WHERE cc.status NOT IN ('resolved', 'closed') +GROUP BY cc.id, cc.community_id, c.name, cc.title, cc.conflict_type, + cc.status, cc.severity_level, cc.is_urgent, cc.reported_at +ORDER BY cc.is_urgent DESC, cc.severity_level DESC, cc.reported_at; + +CREATE OR REPLACE VIEW mediator_workload AS +SELECT + mp.user_id, + u.username, + mp.community_id, + mp.certification_level, + mp.is_available, + mp.max_concurrent_cases, + COUNT(cm.id) FILTER (WHERE cc.status NOT IN ('resolved', 'closed')) AS active_cases, + mp.cases_mediated AS total_cases, + mp.successful_resolutions, + mp.satisfaction_score +FROM mediator_pool mp +JOIN users u ON u.id = mp.user_id +LEFT JOIN conflict_mediators cm ON cm.user_id = mp.user_id +LEFT JOIN conflict_cases cc ON cc.id = cm.conflict_id +GROUP BY mp.user_id, u.username, mp.community_id, mp.certification_level, + mp.is_available, mp.max_concurrent_cases, mp.cases_mediated, + mp.successful_resolutions, mp.satisfaction_score; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'conflict_resolution', + 'Structured conflict resolution with mediation sessions, compromise proposals, and assisted voting. Inspired by real-world FLOSS community practices.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'auto_assign_mediators', jsonb_build_object( + 'type', 'boolean', + 'title', 'Auto-assign Mediators', + 'description', 'Automatically assign available mediators to new conflicts', + 'default', true + ), + 'min_mediators', jsonb_build_object( + 'type', 'integer', + 'title', 'Minimum Mediators', + 'description', 'Minimum number of mediators per conflict', + 'default', 1, + 'minimum', 1, + 'maximum', 5 + ), + 'max_resolution_days', jsonb_build_object( + 'type', 'integer', + 'title', 'Max Resolution Days', + 'description', 'Target days for conflict resolution', + 'default', 30, + 'minimum', 7 + ), + 'require_compromise_vote', jsonb_build_object( + 'type', 'boolean', + 'title', 'Require Community Vote', + 'description', 'Require community vote on compromise proposals', + 'default', false + ), + 'anonymous_reporting', jsonb_build_object( + 'type', 'boolean', + 'title', 'Allow Anonymous Reporting', + 'description', 'Allow conflicts to be reported anonymously', + 'default', true + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126310000_structured_deliberation.sql b/backend/migrations/20260126310000_structured_deliberation.sql new file mode 100644 index 0000000..ac47c4f --- /dev/null +++ b/backend/migrations/20260126310000_structured_deliberation.sql @@ -0,0 +1,451 @@ +-- ============================================================================ +-- STRUCTURED DELIBERATION PLUGIN +-- Pro/con arguments, collaborative summaries, noise reduction +-- ============================================================================ + +-- Enable ltree extension for hierarchical threading +CREATE EXTENSION IF NOT EXISTS ltree; + +-- ============================================================================ +-- ARGUMENT THREADS +-- Structured pro/con arguments linked to proposals +-- ============================================================================ + +CREATE TYPE argument_stance AS ENUM ('pro', 'con', 'neutral', 'question', 'clarification'); + +CREATE TABLE deliberation_arguments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + parent_id UUID REFERENCES deliberation_arguments(id) ON DELETE CASCADE, + + -- Argument content + stance argument_stance NOT NULL, + title VARCHAR(300) NOT NULL, + content TEXT NOT NULL, + + -- Author + author_id UUID NOT NULL REFERENCES users(id), + + -- Engagement metrics + upvotes INT NOT NULL DEFAULT 0, + downvotes INT NOT NULL DEFAULT 0, + reply_count INT NOT NULL DEFAULT 0, + + -- Quality indicators + is_substantive BOOLEAN NOT NULL DEFAULT TRUE, + is_featured BOOLEAN NOT NULL DEFAULT FALSE, + quality_score DECIMAL(5,2) DEFAULT 0, + + -- Moderation + is_hidden BOOLEAN NOT NULL DEFAULT FALSE, + hidden_reason TEXT, + + -- Threading + depth INT NOT NULL DEFAULT 0, + thread_path LTREE, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_deliberation_arguments_proposal ON deliberation_arguments(proposal_id); +CREATE INDEX idx_deliberation_arguments_parent ON deliberation_arguments(parent_id); +CREATE INDEX idx_deliberation_arguments_stance ON deliberation_arguments(proposal_id, stance); +CREATE INDEX idx_deliberation_arguments_featured ON deliberation_arguments(proposal_id, is_featured) WHERE is_featured = true; +CREATE INDEX idx_deliberation_arguments_quality ON deliberation_arguments(proposal_id, quality_score DESC); + +COMMENT ON TABLE deliberation_arguments IS 'Structured pro/con arguments for proposals'; + +-- ============================================================================ +-- ARGUMENT VOTES +-- Community voting on argument quality/relevance +-- ============================================================================ + +CREATE TABLE argument_votes ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + argument_id UUID NOT NULL REFERENCES deliberation_arguments(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + vote_type VARCHAR(20) NOT NULL, -- upvote, downvote + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(argument_id, user_id) +); + +CREATE INDEX idx_argument_votes_argument ON argument_votes(argument_id); + +-- ============================================================================ +-- COLLABORATIVE SUMMARIES +-- Community-maintained summaries of deliberation +-- ============================================================================ + +CREATE TYPE summary_type AS ENUM ( + 'executive', -- Brief executive summary + 'pro_arguments', -- Summary of pro arguments + 'con_arguments', -- Summary of con arguments + 'consensus', -- Areas of consensus + 'contention', -- Points of contention + 'questions', -- Open questions + 'full' -- Full deliberation summary +); + +CREATE TABLE deliberation_summaries ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + summary_type summary_type NOT NULL, + + -- Content + content TEXT NOT NULL, + key_points JSONB DEFAULT '[]', + + -- Versioning + version INT NOT NULL DEFAULT 1, + + -- Author/Editor + last_editor_id UUID NOT NULL REFERENCES users(id), + + -- Approval + is_approved BOOLEAN NOT NULL DEFAULT FALSE, + approved_by UUID REFERENCES users(id), + approved_at TIMESTAMPTZ, + + -- Metrics + edit_count INT NOT NULL DEFAULT 1, + view_count INT NOT NULL DEFAULT 0, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, summary_type) +); + +CREATE INDEX idx_deliberation_summaries_proposal ON deliberation_summaries(proposal_id); + +COMMENT ON TABLE deliberation_summaries IS 'Collaborative summaries of deliberation'; + +-- ============================================================================ +-- SUMMARY EDIT HISTORY +-- Track all edits to summaries +-- ============================================================================ + +CREATE TABLE summary_edit_history ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + summary_id UUID NOT NULL REFERENCES deliberation_summaries(id) ON DELETE CASCADE, + version INT NOT NULL, + content TEXT NOT NULL, + key_points JSONB, + editor_id UUID NOT NULL REFERENCES users(id), + edit_comment TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_summary_edit_history_summary ON summary_edit_history(summary_id); + +-- ============================================================================ +-- READING REQUIREMENTS +-- Track if users have read required content before participating +-- ============================================================================ + +CREATE TABLE deliberation_reading_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + user_id UUID NOT NULL REFERENCES users(id), + + -- What was read + read_proposal BOOLEAN NOT NULL DEFAULT FALSE, + read_summaries BOOLEAN NOT NULL DEFAULT FALSE, + read_top_arguments BOOLEAN NOT NULL DEFAULT FALSE, + + -- Timing + first_read_at TIMESTAMPTZ, + reading_time_seconds INT NOT NULL DEFAULT 0, + + -- Can participate + can_comment BOOLEAN GENERATED ALWAYS AS (read_proposal) STORED, + can_vote BOOLEAN GENERATED ALWAYS AS (read_proposal AND read_summaries) STORED, + + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(proposal_id, user_id) +); + +CREATE INDEX idx_reading_log_proposal ON deliberation_reading_log(proposal_id); +CREATE INDEX idx_reading_log_user ON deliberation_reading_log(user_id); + +COMMENT ON TABLE deliberation_reading_log IS 'Track reading before participation'; + +-- ============================================================================ +-- DISCUSSION QUALITY METRICS +-- Track deliberation quality over time +-- ============================================================================ + +CREATE TABLE deliberation_metrics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + calculated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- Argument counts + total_arguments INT NOT NULL DEFAULT 0, + pro_arguments INT NOT NULL DEFAULT 0, + con_arguments INT NOT NULL DEFAULT 0, + neutral_arguments INT NOT NULL DEFAULT 0, + + -- Participation + unique_participants INT NOT NULL DEFAULT 0, + avg_argument_length INT, + + -- Quality indicators + substantive_ratio DECIMAL(5,4), -- Ratio of substantive arguments + engagement_score DECIMAL(5,2), + balance_score DECIMAL(5,4), -- How balanced pro/con + + -- Reading compliance + readers_before_posting DECIMAL(5,4), + + UNIQUE(proposal_id, calculated_at) +); + +CREATE INDEX idx_deliberation_metrics_proposal ON deliberation_metrics(proposal_id); + +-- ============================================================================ +-- FACILITATION PROMPTS +-- AI/moderator prompts to improve deliberation +-- ============================================================================ + +CREATE TABLE facilitation_prompts ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + proposal_id UUID NOT NULL REFERENCES proposals(id) ON DELETE CASCADE, + + -- Prompt content + prompt_type VARCHAR(50) NOT NULL, -- balance_needed, clarification_needed, summary_outdated + message TEXT NOT NULL, + + -- Targeting + target_stance argument_stance, + + -- Status + is_active BOOLEAN NOT NULL DEFAULT TRUE, + addressed_at TIMESTAMPTZ, + addressed_by UUID REFERENCES users(id), + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_facilitation_prompts_proposal ON facilitation_prompts(proposal_id); + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +-- Add an argument and update counts +CREATE OR REPLACE FUNCTION add_deliberation_argument( + p_proposal_id UUID, + p_parent_id UUID, + p_stance argument_stance, + p_title VARCHAR(300), + p_content TEXT, + p_author_id UUID +) RETURNS UUID AS $$ +DECLARE + v_argument_id UUID; + v_depth INT := 0; + v_parent_path LTREE; +BEGIN + -- Calculate depth and path + IF p_parent_id IS NOT NULL THEN + SELECT depth + 1, thread_path INTO v_depth, v_parent_path + FROM deliberation_arguments WHERE id = p_parent_id; + END IF; + + -- Insert argument + INSERT INTO deliberation_arguments ( + proposal_id, parent_id, stance, title, content, author_id, depth + ) VALUES ( + p_proposal_id, p_parent_id, p_stance, p_title, p_content, p_author_id, v_depth + ) RETURNING id INTO v_argument_id; + + -- Update thread path + UPDATE deliberation_arguments + SET thread_path = COALESCE(v_parent_path || v_argument_id::text, v_argument_id::text::ltree) + WHERE id = v_argument_id; + + -- Update parent reply count + IF p_parent_id IS NOT NULL THEN + UPDATE deliberation_arguments + SET reply_count = reply_count + 1, updated_at = NOW() + WHERE id = p_parent_id; + END IF; + + RETURN v_argument_id; +END; +$$ LANGUAGE plpgsql; + +-- Vote on an argument +CREATE OR REPLACE FUNCTION vote_on_argument( + p_argument_id UUID, + p_user_id UUID, + p_vote_type VARCHAR(20) +) RETURNS VOID AS $$ +DECLARE + v_old_vote VARCHAR(20); +BEGIN + -- Get existing vote + SELECT vote_type INTO v_old_vote + FROM argument_votes + WHERE argument_id = p_argument_id AND user_id = p_user_id; + + -- Remove old vote effect + IF v_old_vote IS NOT NULL THEN + UPDATE deliberation_arguments SET + upvotes = upvotes - CASE WHEN v_old_vote = 'upvote' THEN 1 ELSE 0 END, + downvotes = downvotes - CASE WHEN v_old_vote = 'downvote' THEN 1 ELSE 0 END + WHERE id = p_argument_id; + END IF; + + -- Insert/update vote + INSERT INTO argument_votes (argument_id, user_id, vote_type) + VALUES (p_argument_id, p_user_id, p_vote_type) + ON CONFLICT (argument_id, user_id) DO UPDATE SET + vote_type = p_vote_type, + created_at = NOW(); + + -- Apply new vote + UPDATE deliberation_arguments SET + upvotes = upvotes + CASE WHEN p_vote_type = 'upvote' THEN 1 ELSE 0 END, + downvotes = downvotes + CASE WHEN p_vote_type = 'downvote' THEN 1 ELSE 0 END, + quality_score = (upvotes + CASE WHEN p_vote_type = 'upvote' THEN 1 ELSE 0 END - + downvotes - CASE WHEN p_vote_type = 'downvote' THEN 1 ELSE 0 END)::DECIMAL / + GREATEST(upvotes + downvotes + 1, 1) + WHERE id = p_argument_id; +END; +$$ LANGUAGE plpgsql; + +-- Calculate deliberation metrics +CREATE OR REPLACE FUNCTION calculate_deliberation_metrics(p_proposal_id UUID) +RETURNS UUID AS $$ +DECLARE + v_metric_id UUID; +BEGIN + INSERT INTO deliberation_metrics ( + proposal_id, + total_arguments, + pro_arguments, + con_arguments, + neutral_arguments, + unique_participants, + avg_argument_length, + substantive_ratio, + balance_score + ) + SELECT + p_proposal_id, + COUNT(*), + COUNT(*) FILTER (WHERE stance = 'pro'), + COUNT(*) FILTER (WHERE stance = 'con'), + COUNT(*) FILTER (WHERE stance = 'neutral'), + COUNT(DISTINCT author_id), + AVG(LENGTH(content))::INT, + COUNT(*) FILTER (WHERE is_substantive)::DECIMAL / NULLIF(COUNT(*), 0), + 1 - ABS( + COUNT(*) FILTER (WHERE stance = 'pro')::DECIMAL / NULLIF(COUNT(*) FILTER (WHERE stance IN ('pro', 'con')), 0) - 0.5 + ) * 2 + FROM deliberation_arguments + WHERE proposal_id = p_proposal_id AND NOT is_hidden + RETURNING id INTO v_metric_id; + + RETURN v_metric_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW proposal_deliberation_summary AS +SELECT + p.id AS proposal_id, + p.title AS proposal_title, + COUNT(da.id) AS total_arguments, + COUNT(da.id) FILTER (WHERE da.stance = 'pro') AS pro_count, + COUNT(da.id) FILTER (WHERE da.stance = 'con') AS con_count, + COUNT(DISTINCT da.author_id) AS participants, + COUNT(ds.id) AS summaries_count, + BOOL_OR(ds.is_approved) AS has_approved_summary, + MAX(da.quality_score) AS top_argument_score +FROM proposals p +LEFT JOIN deliberation_arguments da ON da.proposal_id = p.id AND NOT da.is_hidden +LEFT JOIN deliberation_summaries ds ON ds.proposal_id = p.id +GROUP BY p.id, p.title; + +CREATE OR REPLACE VIEW top_arguments AS +SELECT + da.id, + da.proposal_id, + da.stance::text, + da.title, + da.content, + u.username AS author_username, + da.upvotes, + da.downvotes, + da.quality_score, + da.is_featured, + da.reply_count, + da.created_at +FROM deliberation_arguments da +JOIN users u ON u.id = da.author_id +WHERE NOT da.is_hidden AND da.parent_id IS NULL +ORDER BY da.proposal_id, da.quality_score DESC; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'structured_deliberation', + 'Structured deliberation with pro/con arguments, collaborative summaries, and quality metrics. Reduces noise and promotes thoughtful discussion.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'require_reading', jsonb_build_object( + 'type', 'boolean', + 'title', 'Require Reading', + 'description', 'Require users to read proposal before commenting', + 'default', true + ), + 'min_reading_time_seconds', jsonb_build_object( + 'type', 'integer', + 'title', 'Minimum Reading Time', + 'description', 'Minimum seconds spent reading before can participate', + 'default', 30, + 'minimum', 0 + ), + 'enable_summaries', jsonb_build_object( + 'type', 'boolean', + 'title', 'Enable Summaries', + 'description', 'Enable collaborative summary creation', + 'default', true + ), + 'summary_approval_required', jsonb_build_object( + 'type', 'boolean', + 'title', 'Summary Approval Required', + 'description', 'Require moderator approval for summaries', + 'default', false + ), + 'show_balance_prompts', jsonb_build_object( + 'type', 'boolean', + 'title', 'Show Balance Prompts', + 'description', 'Show prompts when deliberation is unbalanced', + 'default', true + ), + 'featured_argument_threshold', jsonb_build_object( + 'type', 'integer', + 'title', 'Featured Threshold', + 'description', 'Minimum upvotes for auto-featuring', + 'default', 10, + 'minimum', 1 + ) + ) + ) +) ON CONFLICT (name) DO UPDATE SET + version = EXCLUDED.version, + description = EXCLUDED.description; diff --git a/backend/migrations/20260126320000_public_data_export.sql b/backend/migrations/20260126320000_public_data_export.sql new file mode 100644 index 0000000..85863d2 --- /dev/null +++ b/backend/migrations/20260126320000_public_data_export.sql @@ -0,0 +1,253 @@ +-- ============================================================================ +-- PUBLIC DATA EXPORT PLUGIN +-- CSV/JSON exports, complete datasets, privacy-aware, open data by default +-- ============================================================================ + +CREATE TYPE export_format AS ENUM ('json', 'csv', 'jsonl'); +CREATE TYPE export_scope AS ENUM ('community', 'global', 'personal'); +CREATE TYPE export_status AS ENUM ('pending', 'processing', 'completed', 'failed', 'expired'); + +-- ============================================================================ +-- EXPORT CONFIGURATIONS +-- ============================================================================ + +CREATE TABLE export_configurations ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + name VARCHAR(100) NOT NULL, + description TEXT, + export_type VARCHAR(50) NOT NULL, + default_format export_format NOT NULL DEFAULT 'json', + scope export_scope NOT NULL DEFAULT 'community', + anonymize_users BOOLEAN NOT NULL DEFAULT TRUE, + include_deleted BOOLEAN NOT NULL DEFAULT FALSE, + redact_fields VARCHAR(100)[] DEFAULT '{}', + public_access BOOLEAN NOT NULL DEFAULT TRUE, + rate_limit_per_hour INT NOT NULL DEFAULT 10, + max_records INT NOT NULL DEFAULT 100000, + is_active BOOLEAN NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(community_id, name) +); + +CREATE INDEX idx_export_configs_community ON export_configurations(community_id); + +-- ============================================================================ +-- EXPORT JOBS +-- ============================================================================ + +CREATE TABLE export_jobs ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + configuration_id UUID REFERENCES export_configurations(id) ON DELETE SET NULL, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + export_type VARCHAR(50) NOT NULL, + format export_format NOT NULL DEFAULT 'json', + parameters JSONB DEFAULT '{}', + date_from TIMESTAMPTZ, + date_to TIMESTAMPTZ, + requested_by UUID REFERENCES users(id), + requested_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + status export_status NOT NULL DEFAULT 'pending', + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ, + error_message TEXT, + record_count INT, + file_size_bytes BIGINT, + file_path TEXT, + download_url TEXT, + download_expires_at TIMESTAMPTZ, + download_count INT NOT NULL DEFAULT 0 +); + +CREATE INDEX idx_export_jobs_community ON export_jobs(community_id); +CREATE INDEX idx_export_jobs_status ON export_jobs(status); +CREATE INDEX idx_export_jobs_pending ON export_jobs(status, requested_at) WHERE status = 'pending'; + +-- ============================================================================ +-- SCHEDULED EXPORTS (pre-generated datasets) +-- ============================================================================ + +CREATE TABLE scheduled_exports ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + configuration_id UUID NOT NULL REFERENCES export_configurations(id) ON DELETE CASCADE, + community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + dataset_name VARCHAR(200) NOT NULL, + description TEXT, + generated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + data_from TIMESTAMPTZ, + data_to TIMESTAMPTZ, + json_file_path TEXT, + csv_file_path TEXT, + record_count INT NOT NULL, + checksum VARCHAR(64), + is_current BOOLEAN NOT NULL DEFAULT TRUE, + expires_at TIMESTAMPTZ +); + +CREATE INDEX idx_scheduled_exports_current ON scheduled_exports(community_id, is_current) WHERE is_current = true; + +-- ============================================================================ +-- DATA DICTIONARY +-- ============================================================================ + +CREATE TABLE export_data_dictionary ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + export_type VARCHAR(50) NOT NULL, + field_name VARCHAR(100) NOT NULL, + field_type VARCHAR(50) NOT NULL, + description TEXT NOT NULL, + is_pii BOOLEAN NOT NULL DEFAULT FALSE, + anonymization_method VARCHAR(50), + example_value TEXT, + UNIQUE(export_type, field_name) +); + +-- ============================================================================ +-- EXPORT AUDIT LOG +-- ============================================================================ + +CREATE TABLE export_audit_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + job_id UUID REFERENCES export_jobs(id), + community_id UUID REFERENCES communities(id) ON DELETE SET NULL, + action_type VARCHAR(50) NOT NULL, + actor_id UUID REFERENCES users(id), + ip_address INET, + user_agent TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_export_audit_community ON export_audit_log(community_id); +CREATE INDEX idx_export_audit_time ON export_audit_log(created_at); + +-- ============================================================================ +-- DEFAULT CONFIGURATIONS +-- ============================================================================ + +INSERT INTO export_data_dictionary (export_type, field_name, field_type, description, is_pii, anonymization_method) VALUES +('proposals', 'id', 'uuid', 'Unique proposal identifier', false, null), +('proposals', 'title', 'string', 'Proposal title', false, null), +('proposals', 'content', 'text', 'Full proposal content', false, null), +('proposals', 'author_id', 'uuid', 'Author identifier (anonymized)', true, 'hash'), +('proposals', 'status', 'string', 'Current proposal status', false, null), +('proposals', 'created_at', 'timestamp', 'Creation timestamp', false, null), +('proposals', 'vote_count', 'integer', 'Total votes received', false, null), +('votes', 'id', 'uuid', 'Unique vote identifier', false, null), +('votes', 'proposal_id', 'uuid', 'Associated proposal', false, null), +('votes', 'voter_id', 'uuid', 'Voter identifier (anonymized)', true, 'hash'), +('votes', 'vote_value', 'jsonb', 'Vote data (method-specific)', false, null), +('votes', 'created_at', 'timestamp', 'Vote timestamp', false, null), +('analytics', 'date', 'date', 'Metric date', false, null), +('analytics', 'metric_name', 'string', 'Metric identifier', false, null), +('analytics', 'value', 'numeric', 'Metric value', false, null); + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +CREATE OR REPLACE FUNCTION create_export_job( + p_community_id UUID, + p_export_type VARCHAR(50), + p_format export_format, + p_requested_by UUID, + p_date_from TIMESTAMPTZ DEFAULT NULL, + p_date_to TIMESTAMPTZ DEFAULT NULL +) RETURNS UUID AS $$ +DECLARE + v_job_id UUID; + v_config export_configurations%ROWTYPE; +BEGIN + SELECT * INTO v_config FROM export_configurations + WHERE community_id = p_community_id AND export_type = p_export_type AND is_active + LIMIT 1; + + INSERT INTO export_jobs ( + configuration_id, community_id, export_type, format, + date_from, date_to, requested_by + ) VALUES ( + v_config.id, p_community_id, p_export_type, p_format, + p_date_from, p_date_to, p_requested_by + ) RETURNING id INTO v_job_id; + + INSERT INTO export_audit_log (job_id, community_id, action_type, actor_id) + VALUES (v_job_id, p_community_id, 'requested', p_requested_by); + + RETURN v_job_id; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION get_exportable_proposals( + p_community_id UUID, + p_anonymize BOOLEAN DEFAULT TRUE, + p_date_from TIMESTAMPTZ DEFAULT NULL, + p_date_to TIMESTAMPTZ DEFAULT NULL +) RETURNS TABLE ( + id UUID, + title VARCHAR, + content TEXT, + author_id TEXT, + status VARCHAR, + created_at TIMESTAMPTZ, + vote_count BIGINT +) AS $$ +BEGIN + RETURN QUERY + SELECT + p.id, + p.title, + p.content, + CASE WHEN p_anonymize THEN encode(sha256(p.author_id::text::bytea), 'hex') ELSE p.author_id::text END, + p.status::VARCHAR, + p.created_at, + COALESCE((SELECT COUNT(*) FROM votes v WHERE v.proposal_id = p.id), 0) + FROM proposals p + WHERE p.community_id = p_community_id + AND (p_date_from IS NULL OR p.created_at >= p_date_from) + AND (p_date_to IS NULL OR p.created_at <= p_date_to) + ORDER BY p.created_at DESC; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW available_exports AS +SELECT + ec.id AS config_id, + ec.community_id, + c.name AS community_name, + ec.name AS export_name, + ec.description, + ec.export_type, + ec.public_access, + se.id AS latest_export_id, + se.generated_at AS latest_generated, + se.record_count AS latest_record_count +FROM export_configurations ec +JOIN communities c ON c.id = ec.community_id +LEFT JOIN scheduled_exports se ON se.configuration_id = ec.id AND se.is_current = true +WHERE ec.is_active = true; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'public_data_export', + 'Privacy-aware public data exports in CSV/JSON formats. Supports anonymization, scheduled generation, and complete audit trails.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'default_anonymize', jsonb_build_object('type', 'boolean', 'default', true), + 'allow_public_exports', jsonb_build_object('type', 'boolean', 'default', true), + 'max_export_records', jsonb_build_object('type', 'integer', 'default', 100000), + 'export_retention_days', jsonb_build_object('type', 'integer', 'default', 30), + 'auto_generate_daily', jsonb_build_object('type', 'boolean', 'default', false) + ) + ) +) ON CONFLICT (name) DO UPDATE SET version = EXCLUDED.version; diff --git a/backend/migrations/20260126330000_federation.sql b/backend/migrations/20260126330000_federation.sql new file mode 100644 index 0000000..1891bbe --- /dev/null +++ b/backend/migrations/20260126330000_federation.sql @@ -0,0 +1,346 @@ +-- ============================================================================ +-- MULTI-COMMUNITY FEDERATION PLUGIN +-- Cross-instance collaboration, federated decisions, shared governance +-- ============================================================================ + +CREATE TYPE federation_status AS ENUM ('pending', 'active', 'suspended', 'revoked'); +CREATE TYPE sync_direction AS ENUM ('push', 'pull', 'bidirectional'); + +-- ============================================================================ +-- FEDERATED INSTANCES +-- Known remote Likwid instances +-- ============================================================================ + +CREATE TABLE federated_instances ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + instance_url VARCHAR(500) NOT NULL UNIQUE, + instance_name VARCHAR(200) NOT NULL, + instance_description TEXT, + + -- Trust & verification + public_key TEXT, + is_verified BOOLEAN NOT NULL DEFAULT FALSE, + verified_at TIMESTAMPTZ, + trust_level INT NOT NULL DEFAULT 1, -- 1-5 + + -- Status + status federation_status NOT NULL DEFAULT 'pending', + + -- Capabilities + supported_features VARCHAR(100)[] DEFAULT '{}', + protocol_version VARCHAR(20) NOT NULL DEFAULT '1.0', + + -- Statistics + last_sync_at TIMESTAMPTZ, + sync_failures INT NOT NULL DEFAULT 0, + total_syncs INT NOT NULL DEFAULT 0, + + -- Metadata + admin_contact VARCHAR(300), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_federated_instances_status ON federated_instances(status); +CREATE INDEX idx_federated_instances_url ON federated_instances(instance_url); + +-- ============================================================================ +-- COMMUNITY FEDERATIONS +-- Links between local and remote communities +-- ============================================================================ + +CREATE TABLE community_federations ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + local_community_id UUID NOT NULL REFERENCES communities(id) ON DELETE CASCADE, + remote_instance_id UUID NOT NULL REFERENCES federated_instances(id) ON DELETE CASCADE, + remote_community_id UUID NOT NULL, + remote_community_name VARCHAR(200), + + -- Configuration + sync_direction sync_direction NOT NULL DEFAULT 'bidirectional', + sync_proposals BOOLEAN NOT NULL DEFAULT TRUE, + sync_votes BOOLEAN NOT NULL DEFAULT FALSE, + sync_comments BOOLEAN NOT NULL DEFAULT FALSE, + sync_decisions BOOLEAN NOT NULL DEFAULT TRUE, + + -- Status + status federation_status NOT NULL DEFAULT 'pending', + approved_locally BOOLEAN NOT NULL DEFAULT FALSE, + approved_remotely BOOLEAN NOT NULL DEFAULT FALSE, + + -- Mapping + member_mapping JSONB DEFAULT '{}', + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(local_community_id, remote_instance_id, remote_community_id) +); + +CREATE INDEX idx_community_federations_local ON community_federations(local_community_id); +CREATE INDEX idx_community_federations_remote ON community_federations(remote_instance_id); + +-- ============================================================================ +-- FEDERATED PROPOSALS +-- Proposals shared across federated communities +-- ============================================================================ + +CREATE TABLE federated_proposals ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + federation_id UUID NOT NULL REFERENCES community_federations(id) ON DELETE CASCADE, + + -- Local reference + local_proposal_id UUID REFERENCES proposals(id) ON DELETE SET NULL, + + -- Remote reference + remote_proposal_id UUID NOT NULL, + remote_url VARCHAR(500), + + -- Sync metadata + is_origin_local BOOLEAN NOT NULL, + last_synced_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + sync_hash VARCHAR(64), + + -- Status + sync_status VARCHAR(50) NOT NULL DEFAULT 'synced', + conflict_detected BOOLEAN NOT NULL DEFAULT FALSE, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_federated_proposals_federation ON federated_proposals(federation_id); +CREATE INDEX idx_federated_proposals_local ON federated_proposals(local_proposal_id); + +-- ============================================================================ +-- FEDERATED DECISIONS +-- Cross-community voting results +-- ============================================================================ + +CREATE TABLE federated_decisions ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + federated_proposal_id UUID NOT NULL REFERENCES federated_proposals(id) ON DELETE CASCADE, + + -- Decision details + decision_type VARCHAR(50) NOT NULL, + outcome VARCHAR(50) NOT NULL, + + -- Aggregated results (no individual votes) + participating_communities INT NOT NULL DEFAULT 1, + total_votes INT NOT NULL DEFAULT 0, + approval_percentage DECIMAL(5,2), + + -- Per-community breakdown + community_results JSONB NOT NULL DEFAULT '[]', + + -- Finalization + is_final BOOLEAN NOT NULL DEFAULT FALSE, + finalized_at TIMESTAMPTZ, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_federated_decisions_proposal ON federated_decisions(federated_proposal_id); + +-- ============================================================================ +-- SYNC LOG +-- Track all federation sync operations +-- ============================================================================ + +CREATE TABLE federation_sync_log ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + federation_id UUID REFERENCES community_federations(id) ON DELETE SET NULL, + instance_id UUID REFERENCES federated_instances(id) ON DELETE SET NULL, + + -- Operation + operation_type VARCHAR(50) NOT NULL, + direction sync_direction NOT NULL, + + -- Results + records_sent INT NOT NULL DEFAULT 0, + records_received INT NOT NULL DEFAULT 0, + conflicts_detected INT NOT NULL DEFAULT 0, + + -- Status + success BOOLEAN NOT NULL, + error_message TEXT, + duration_ms INT, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_sync_log_federation ON federation_sync_log(federation_id); +CREATE INDEX idx_sync_log_time ON federation_sync_log(created_at); + +-- ============================================================================ +-- FEDERATION REQUESTS +-- Pending federation requests +-- ============================================================================ + +CREATE TABLE federation_requests ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + + -- Request details + from_instance_url VARCHAR(500) NOT NULL, + from_community_name VARCHAR(200), + to_community_id UUID REFERENCES communities(id) ON DELETE CASCADE, + + -- Message + request_message TEXT, + proposed_config JSONB, + + -- Status + status VARCHAR(50) NOT NULL DEFAULT 'pending', + reviewed_by UUID REFERENCES users(id), + reviewed_at TIMESTAMPTZ, + review_notes TEXT, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + expires_at TIMESTAMPTZ DEFAULT NOW() + INTERVAL '30 days' +); + +CREATE INDEX idx_federation_requests_community ON federation_requests(to_community_id); +CREATE INDEX idx_federation_requests_pending ON federation_requests(status) WHERE status = 'pending'; + +-- ============================================================================ +-- HELPER FUNCTIONS +-- ============================================================================ + +CREATE OR REPLACE FUNCTION register_federated_instance( + p_url VARCHAR(500), + p_name VARCHAR(200), + p_description TEXT DEFAULT NULL, + p_public_key TEXT DEFAULT NULL +) RETURNS UUID AS $$ +DECLARE + v_instance_id UUID; +BEGIN + INSERT INTO federated_instances (instance_url, instance_name, instance_description, public_key) + VALUES (p_url, p_name, p_description, p_public_key) + ON CONFLICT (instance_url) DO UPDATE SET + instance_name = p_name, + instance_description = COALESCE(p_description, federated_instances.instance_description), + public_key = COALESCE(p_public_key, federated_instances.public_key), + updated_at = NOW() + RETURNING id INTO v_instance_id; + + RETURN v_instance_id; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION create_community_federation( + p_local_community_id UUID, + p_remote_instance_id UUID, + p_remote_community_id UUID, + p_remote_community_name VARCHAR(200), + p_sync_direction sync_direction DEFAULT 'bidirectional' +) RETURNS UUID AS $$ +DECLARE + v_federation_id UUID; +BEGIN + INSERT INTO community_federations ( + local_community_id, remote_instance_id, remote_community_id, + remote_community_name, sync_direction + ) VALUES ( + p_local_community_id, p_remote_instance_id, p_remote_community_id, + p_remote_community_name, p_sync_direction + ) + ON CONFLICT (local_community_id, remote_instance_id, remote_community_id) DO UPDATE SET + remote_community_name = p_remote_community_name, + sync_direction = p_sync_direction, + updated_at = NOW() + RETURNING id INTO v_federation_id; + + RETURN v_federation_id; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION get_federation_stats(p_community_id UUID) +RETURNS TABLE ( + total_federations BIGINT, + active_federations BIGINT, + federated_proposals BIGINT, + total_syncs BIGINT, + last_sync TIMESTAMPTZ +) AS $$ +BEGIN + RETURN QUERY + SELECT + COUNT(DISTINCT cf.id)::BIGINT, + COUNT(DISTINCT cf.id) FILTER (WHERE cf.status = 'active')::BIGINT, + COUNT(DISTINCT fp.id)::BIGINT, + SUM(fi.total_syncs)::BIGINT, + MAX(fi.last_sync_at) + FROM community_federations cf + LEFT JOIN federated_instances fi ON fi.id = cf.remote_instance_id + LEFT JOIN federated_proposals fp ON fp.federation_id = cf.id + WHERE cf.local_community_id = p_community_id; +END; +$$ LANGUAGE plpgsql; + +-- ============================================================================ +-- VIEWS +-- ============================================================================ + +CREATE OR REPLACE VIEW active_federations AS +SELECT + cf.id, + cf.local_community_id, + lc.name AS local_community_name, + fi.instance_url, + fi.instance_name, + cf.remote_community_id, + cf.remote_community_name, + cf.sync_direction::text, + cf.status::text, + fi.last_sync_at, + COUNT(fp.id) AS shared_proposals +FROM community_federations cf +JOIN communities lc ON lc.id = cf.local_community_id +JOIN federated_instances fi ON fi.id = cf.remote_instance_id +LEFT JOIN federated_proposals fp ON fp.federation_id = cf.id +WHERE cf.status = 'active' +GROUP BY cf.id, cf.local_community_id, lc.name, fi.instance_url, fi.instance_name, + cf.remote_community_id, cf.remote_community_name, cf.sync_direction, cf.status, fi.last_sync_at; + +CREATE OR REPLACE VIEW federation_health AS +SELECT + fi.id AS instance_id, + fi.instance_url, + fi.instance_name, + fi.status::text, + fi.trust_level, + fi.last_sync_at, + fi.sync_failures, + fi.total_syncs, + CASE + WHEN fi.total_syncs = 0 THEN 0 + ELSE ((fi.total_syncs - fi.sync_failures)::DECIMAL / fi.total_syncs * 100) + END AS success_rate, + COUNT(cf.id) AS community_count +FROM federated_instances fi +LEFT JOIN community_federations cf ON cf.remote_instance_id = fi.id AND cf.status = 'active' +GROUP BY fi.id, fi.instance_url, fi.instance_name, fi.status, fi.trust_level, + fi.last_sync_at, fi.sync_failures, fi.total_syncs; + +-- ============================================================================ +-- PLUGIN REGISTRATION +-- ============================================================================ + +INSERT INTO plugins (name, description, version, is_core, is_active, settings_schema) +VALUES ( + 'federation', + 'Multi-community federation enabling cross-instance collaboration, shared proposals, and federated decision-making.', + '1.0.0', + false, + true, + jsonb_build_object( + 'type', 'object', + 'properties', jsonb_build_object( + 'allow_incoming_requests', jsonb_build_object('type', 'boolean', 'default', true), + 'auto_approve_verified', jsonb_build_object('type', 'boolean', 'default', false), + 'min_trust_level', jsonb_build_object('type', 'integer', 'default', 2, 'minimum', 1, 'maximum', 5), + 'sync_interval_minutes', jsonb_build_object('type', 'integer', 'default', 15), + 'share_vote_counts', jsonb_build_object('type', 'boolean', 'default', true), + 'share_individual_votes', jsonb_build_object('type', 'boolean', 'default', false) + ) + ) +) ON CONFLICT (name) DO UPDATE SET version = EXCLUDED.version; diff --git a/backend/migrations/20260127105000_wasm_plugin_runtime.sql b/backend/migrations/20260127105000_wasm_plugin_runtime.sql new file mode 100644 index 0000000..11fbfdf --- /dev/null +++ b/backend/migrations/20260127105000_wasm_plugin_runtime.sql @@ -0,0 +1,12 @@ +-- WASM plugin runtime hardening + +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +ALTER TABLE plugins +ADD COLUMN IF NOT EXISTS default_settings JSONB NOT NULL DEFAULT '{}'::jsonb; + +ALTER TABLE community_plugin_packages +ADD COLUMN IF NOT EXISTS settings JSONB NOT NULL DEFAULT '{}'::jsonb; + +CREATE INDEX IF NOT EXISTS idx_community_plugin_packages_active +ON community_plugin_packages(community_id, is_active); diff --git a/backend/migrations/20260127150000_demo_seed_data.sql b/backend/migrations/20260127150000_demo_seed_data.sql new file mode 100644 index 0000000..c8c57b1 --- /dev/null +++ b/backend/migrations/20260127150000_demo_seed_data.sql @@ -0,0 +1,443 @@ +-- Demo Seed Data for Likwid +-- This migration creates realistic governance data for demonstration purposes +-- +-- IMPORTANT: This migration should ONLY be run on demo instances. +-- For production deployments, either: +-- 1. Delete this migration file before running migrations +-- 2. Use a separate database for demo (recommended) +-- +-- The data includes: +-- - 3 demo accounts (contributor, moderator, observer) - password: demo123 +-- - 10 fictional community members +-- - 3 communities (Aurora Framework, Civic Commons, Regional Makers) +-- - 7 proposals in various states +-- - Delegation relationships +-- - Moderation log entries +-- - Discussion comments + +-- ============================================================================ +-- DEMO USERS +-- ============================================================================ + +-- Demo accounts (password: demo123 - bcrypt hash) +-- Hash generated with cost 12: $2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy +INSERT INTO users (id, username, email, password_hash, display_name, is_active) VALUES + ('d0000001-0000-0000-0000-000000000001', 'contributor', 'contributor@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Demo Contributor', true), + ('d0000001-0000-0000-0000-000000000002', 'moderator', 'moderator@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Demo Moderator', true), + ('d0000001-0000-0000-0000-000000000003', 'observer', 'observer@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Demo Observer', true) +ON CONFLICT (username) DO NOTHING; + +-- Fictional community members +INSERT INTO users (id, username, email, password_hash, display_name, is_active) VALUES + ('d0000002-0000-0000-0000-000000000001', 'alice_dev', 'alice@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Alice Chen', true), + ('d0000002-0000-0000-0000-000000000002', 'bob_maintainer', 'bob@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Bob Martinez', true), + ('d0000002-0000-0000-0000-000000000003', 'carol_designer', 'carol@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Carol Nguyen', true), + ('d0000002-0000-0000-0000-000000000004', 'david_infra', 'david@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'David Kim', true), + ('d0000002-0000-0000-0000-000000000005', 'elena_community', 'elena@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Elena Rossi', true), + ('d0000002-0000-0000-0000-000000000006', 'frank_policy', 'frank@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Frank Weber', true), + ('d0000002-0000-0000-0000-000000000007', 'grace_legal', 'grace@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Grace Okonkwo', true), + ('d0000002-0000-0000-0000-000000000008', 'henry_docs', 'henry@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Henry Liu', true), + ('d0000002-0000-0000-0000-000000000009', 'iris_outreach', 'iris@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'Iris Johansson', true), + ('d0000002-0000-0000-0000-000000000010', 'james_security', 'james@demo.likwid.org', '$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/X4.S.NtqOGBNPB2Wy', 'James Park', true) +ON CONFLICT (username) DO NOTHING; + +-- ============================================================================ +-- DEMO COMMUNITIES +-- ============================================================================ + +-- Aurora Framework - Open Source Project +INSERT INTO communities (id, name, slug, description, settings, is_active) VALUES + ('c0000001-0000-0000-0000-000000000001', 'Aurora Framework', 'aurora', + 'A modern web framework for building scalable applications. This community governs technical decisions, RFC processes, and project direction.', + '{"voting_method": "schulze", "require_read_before_vote": true, "min_discussion_days": 3}', + true) +ON CONFLICT (slug) DO NOTHING; + +-- Civic Commons Network - Political Movement +INSERT INTO communities (id, name, slug, description, settings, is_active) VALUES + ('c0000001-0000-0000-0000-000000000002', 'Civic Commons Network', 'civic-commons', + 'A grassroots civic organization dedicated to participatory democracy and community engagement. We use quadratic voting for budget allocation and liquid delegation for policy decisions.', + '{"voting_method": "quadratic", "delegation_enabled": true, "transparency_level": "full"}', + true) +ON CONFLICT (slug) DO NOTHING; + +-- Regional Makers Collective - Federation +INSERT INTO communities (id, name, slug, description, settings, is_active) VALUES + ('c0000001-0000-0000-0000-000000000003', 'Regional Makers Collective', 'makers', + 'A federation of local makerspaces coordinating on shared resources, equipment purchases, and inter-chapter policies. Demonstrates federated governance across autonomous chapters.', + '{"voting_method": "approval", "federation_enabled": true, "chapter_autonomy": true}', + true) +ON CONFLICT (slug) DO NOTHING; + +-- ============================================================================ +-- COMMUNITY MEMBERSHIPS +-- ============================================================================ + +-- Aurora Framework members +INSERT INTO community_members (user_id, community_id, role) VALUES + ('d0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000001', 'member'), + ('d0000001-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000001', 'moderator'), + ('d0000001-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000001', 'member'), + ('d0000002-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000001', 'admin'), + ('d0000002-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000001', 'maintainer'), + ('d0000002-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000001', 'member'), + ('d0000002-0000-0000-0000-000000000004', 'c0000001-0000-0000-0000-000000000001', 'maintainer'), + ('d0000002-0000-0000-0000-000000000008', 'c0000001-0000-0000-0000-000000000001', 'member'), + ('d0000002-0000-0000-0000-000000000010', 'c0000001-0000-0000-0000-000000000001', 'member') +ON CONFLICT (user_id, community_id) DO NOTHING; + +-- Civic Commons members +INSERT INTO community_members (user_id, community_id, role) VALUES + ('d0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000002', 'member'), + ('d0000001-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000002', 'moderator'), + ('d0000001-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000002', 'member'), + ('d0000002-0000-0000-0000-000000000005', 'c0000001-0000-0000-0000-000000000002', 'admin'), + ('d0000002-0000-0000-0000-000000000006', 'c0000001-0000-0000-0000-000000000002', 'delegate'), + ('d0000002-0000-0000-0000-000000000007', 'c0000001-0000-0000-0000-000000000002', 'member'), + ('d0000002-0000-0000-0000-000000000009', 'c0000001-0000-0000-0000-000000000002', 'member') +ON CONFLICT (user_id, community_id) DO NOTHING; + +-- Makers Collective members +INSERT INTO community_members (user_id, community_id, role) VALUES + ('d0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000003', 'member'), + ('d0000001-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000003', 'moderator'), + ('d0000001-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000003', 'member'), + ('d0000002-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000003', 'member'), + ('d0000002-0000-0000-0000-000000000004', 'c0000001-0000-0000-0000-000000000003', 'admin'), + ('d0000002-0000-0000-0000-000000000008', 'c0000001-0000-0000-0000-000000000003', 'member'), + ('d0000002-0000-0000-0000-000000000009', 'c0000001-0000-0000-0000-000000000003', 'member') +ON CONFLICT (user_id, community_id) DO NOTHING; + +-- ============================================================================ +-- VOTING IDENTITIES +-- ============================================================================ + +INSERT INTO voting_identities (user_id, community_id, pseudonym) VALUES + ('d0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000001', 'voter_aurora_001'), + ('d0000001-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000001', 'voter_aurora_002'), + ('d0000002-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000001', 'voter_aurora_003'), + ('d0000002-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000001', 'voter_aurora_004'), + ('d0000002-0000-0000-0000-000000000004', 'c0000001-0000-0000-0000-000000000001', 'voter_aurora_005'), + ('d0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000002', 'voter_civic_001'), + ('d0000002-0000-0000-0000-000000000005', 'c0000001-0000-0000-0000-000000000002', 'voter_civic_002'), + ('d0000002-0000-0000-0000-000000000006', 'c0000001-0000-0000-0000-000000000002', 'voter_civic_003'), + ('d0000002-0000-0000-0000-000000000007', 'c0000001-0000-0000-0000-000000000002', 'voter_civic_004') +ON CONFLICT (user_id, community_id) DO NOTHING; + +-- ============================================================================ +-- TOPICS (for delegation) +-- ============================================================================ + +INSERT INTO topics (id, community_id, name, slug, description) VALUES + ('a0000001-0000-0000-0000-000000000001', 'c0000001-0000-0000-0000-000000000001', 'Architecture', 'architecture', 'System architecture and design decisions'), + ('a0000001-0000-0000-0000-000000000002', 'c0000001-0000-0000-0000-000000000001', 'API Design', 'api', 'Public API design and breaking changes'), + ('a0000001-0000-0000-0000-000000000003', 'c0000001-0000-0000-0000-000000000001', 'Security', 'security', 'Security policies and vulnerability handling'), + ('a0000001-0000-0000-0000-000000000004', 'c0000001-0000-0000-0000-000000000001', 'Documentation', 'docs', 'Documentation standards and practices'), + ('a0000001-0000-0000-0000-000000000005', 'c0000001-0000-0000-0000-000000000002', 'Budget', 'budget', 'Financial decisions and budget allocation'), + ('a0000001-0000-0000-0000-000000000006', 'c0000001-0000-0000-0000-000000000002', 'Policy', 'policy', 'Organizational policies and bylaws'), + ('a0000001-0000-0000-0000-000000000007', 'c0000001-0000-0000-0000-000000000002', 'Outreach', 'outreach', 'Community outreach and partnerships'), + ('a0000001-0000-0000-0000-000000000008', 'c0000001-0000-0000-0000-000000000003', 'Equipment', 'equipment', 'Shared equipment purchases and maintenance'), + ('a0000001-0000-0000-0000-000000000009', 'c0000001-0000-0000-0000-000000000003', 'Events', 'events', 'Inter-chapter events and workshops') +ON CONFLICT (community_id, slug) DO NOTHING; + +-- ============================================================================ +-- PROPOSALS - AURORA FRAMEWORK +-- ============================================================================ + +-- Closed proposal: RFC for async runtime +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000001', + 'c0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000001', + 'RFC-001: Adopt Tokio as the default async runtime', + E'## Summary\n\nThis RFC proposes adopting Tokio as Aurora Framework''s default async runtime.\n\n## Motivation\n\nCurrently, Aurora supports multiple async runtimes, which causes:\n- Fragmented ecosystem\n- Duplicate testing effort\n- Confusion for new users\n\n## Proposal\n\nMake Tokio the default, with optional support for async-std via feature flags.\n\n## Alternatives Considered\n\n1. **Status quo** - Continue supporting all runtimes equally\n2. **async-std default** - Less ecosystem support\n3. **No default** - Too confusing for beginners', + 'closed', + 'schulze', + 'a0000001-0000-0000-0000-000000000001', + NOW() - INTERVAL '30 days', + NOW() - INTERVAL '23 days', + NOW() - INTERVAL '45 days') +ON CONFLICT DO NOTHING; + +-- Options for RFC-001 +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000001', 'b0000001-0000-0000-0000-000000000001', 'Adopt Tokio as default', 'Make Tokio the default runtime with async-std as optional', 1), + ('e0000001-0000-0000-0000-000000000002', 'b0000001-0000-0000-0000-000000000001', 'Keep current multi-runtime support', 'Continue supporting all runtimes equally', 2), + ('e0000001-0000-0000-0000-000000000003', 'b0000001-0000-0000-0000-000000000001', 'Defer decision', 'Postpone until more data is available', 3) +ON CONFLICT DO NOTHING; + +-- Active voting proposal: Breaking change policy +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000002', + 'c0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000002', + 'RFC-002: Semantic versioning and breaking change policy', + E'## Summary\n\nDefine a clear policy for handling breaking changes in Aurora Framework.\n\n## Current Problem\n\nNo formal policy exists for:\n- When breaking changes are allowed\n- How to communicate them\n- Deprecation timelines\n\n## Proposed Policy\n\n1. Breaking changes only in major versions\n2. 6-month deprecation warnings\n3. Migration guides required\n4. LTS releases every 2 years', + 'voting', + 'schulze', + 'a0000001-0000-0000-0000-000000000002', + NOW() - INTERVAL '2 days', + NOW() + INTERVAL '5 days', + NOW() - INTERVAL '14 days') +ON CONFLICT DO NOTHING; + +-- Options for RFC-002 +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000004', 'b0000001-0000-0000-0000-000000000002', 'Strict semver with 6-month deprecation', 'Full proposal as described', 1), + ('e0000001-0000-0000-0000-000000000005', 'b0000001-0000-0000-0000-000000000002', 'Strict semver with 3-month deprecation', 'Faster iteration with shorter warnings', 2), + ('e0000001-0000-0000-0000-000000000006', 'b0000001-0000-0000-0000-000000000002', 'Epoch-based versioning', 'Major redesigns in epochs, minor breaking changes allowed', 3), + ('e0000001-0000-0000-0000-000000000007', 'b0000001-0000-0000-0000-000000000002', 'No formal policy', 'Handle case by case', 4) +ON CONFLICT DO NOTHING; + +-- Discussion proposal: Security disclosure process +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, created_at) VALUES + ('b0000001-0000-0000-0000-000000000003', + 'c0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000010', + 'RFC-003: Security vulnerability disclosure process', + E'## Summary\n\nEstablish a formal security vulnerability disclosure process for Aurora.\n\n## Background\n\nAs Aurora grows in adoption, we need a clear process for:\n- Receiving security reports\n- Coordinating fixes\n- Communicating with users\n- CVE assignment\n\n## Open Questions\n\n1. Should we have a bug bounty program?\n2. What is the appropriate disclosure timeline?\n3. Who should be on the security team?', + 'discussion', + 'approval', + 'a0000001-0000-0000-0000-000000000003', + NOW() - INTERVAL '5 days') +ON CONFLICT DO NOTHING; + +-- ============================================================================ +-- PROPOSALS - CIVIC COMMONS +-- ============================================================================ + +-- Closed controversial proposal: Meeting format change (rejected) +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000004', + 'c0000001-0000-0000-0000-000000000002', + 'd0000002-0000-0000-0000-000000000006', + 'Change monthly meetings to quarterly', + E'## Proposal\n\nReduce the frequency of general assembly meetings from monthly to quarterly.\n\n## Rationale\n\nMonthly meetings have low attendance (average 23%). Quarterly meetings would:\n- Allow more preparation time\n- Reduce volunteer burnout\n- Enable higher-quality discussions\n\n## Opposition Arguments\n\nSome members argue this reduces democratic participation opportunities.', + 'closed', + 'approval', + 'a0000001-0000-0000-0000-000000000006', + NOW() - INTERVAL '60 days', + NOW() - INTERVAL '53 days', + NOW() - INTERVAL '75 days') +ON CONFLICT DO NOTHING; + +-- Options for quarterly meetings +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000008', 'b0000001-0000-0000-0000-000000000004', 'Approve change to quarterly', 'Accept the proposal', 1), + ('e0000001-0000-0000-0000-000000000009', 'b0000001-0000-0000-0000-000000000004', 'Reject - keep monthly', 'Maintain current schedule', 2) +ON CONFLICT DO NOTHING; + +-- Active: Budget allocation using quadratic voting +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000005', + 'c0000001-0000-0000-0000-000000000002', + 'd0000002-0000-0000-0000-000000000005', + '2026 Q2 Budget Allocation', + E'## Budget Overview\n\nTotal available: €15,000\n\n## Proposed Allocations\n\nUse quadratic voting to allocate budget across these initiatives:\n\n1. **Community Outreach** - Flyers, events, social media\n2. **Legal Fund** - Lawyer retainer for policy advocacy\n3. **Digital Infrastructure** - Website, tools, hosting\n4. **Training Programs** - Workshops for members\n5. **Emergency Reserve** - Unallocated contingency\n\nEach member receives 100 voice credits to allocate.', + 'voting', + 'quadratic', + 'a0000001-0000-0000-0000-000000000005', + NOW() - INTERVAL '1 day', + NOW() + INTERVAL '6 days', + NOW() - INTERVAL '10 days') +ON CONFLICT DO NOTHING; + +-- Options for budget allocation +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000010', 'b0000001-0000-0000-0000-000000000005', 'Community Outreach', 'Marketing, events, awareness campaigns', 1), + ('e0000001-0000-0000-0000-000000000011', 'b0000001-0000-0000-0000-000000000005', 'Legal Fund', 'Policy advocacy and legal support', 2), + ('e0000001-0000-0000-0000-000000000012', 'b0000001-0000-0000-0000-000000000005', 'Digital Infrastructure', 'Websites, tools, and hosting', 3), + ('e0000001-0000-0000-0000-000000000013', 'b0000001-0000-0000-0000-000000000005', 'Training Programs', 'Member education and workshops', 4), + ('e0000001-0000-0000-0000-000000000014', 'b0000001-0000-0000-0000-000000000005', 'Emergency Reserve', 'Contingency fund', 5) +ON CONFLICT DO NOTHING; + +-- ============================================================================ +-- PROPOSALS - MAKERS COLLECTIVE +-- ============================================================================ + +-- Closed: Shared laser cutter purchase +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000006', + 'c0000001-0000-0000-0000-000000000003', + 'd0000002-0000-0000-0000-000000000004', + 'Shared laser cutter purchase - Glowforge Pro', + E'## Proposal\n\nPurchase a Glowforge Pro laser cutter (€6,500) for shared use across chapters.\n\n## Hosting\n\nThe machine would be hosted at the Central Chapter, with a booking system for all members.\n\n## Cost Sharing\n\n- 40% Central Chapter\n- 20% each for North, South, and East chapters\n\n## Maintenance\n\nMonthly maintenance fund of €50 contributed equally.', + 'closed', + 'approval', + 'a0000001-0000-0000-0000-000000000008', + NOW() - INTERVAL '45 days', + NOW() - INTERVAL '38 days', + NOW() - INTERVAL '60 days') +ON CONFLICT DO NOTHING; + +-- Options for laser cutter +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000015', 'b0000001-0000-0000-0000-000000000006', 'Approve purchase', 'Buy the Glowforge Pro with proposed cost sharing', 1), + ('e0000001-0000-0000-0000-000000000016', 'b0000001-0000-0000-0000-000000000006', 'Reject', 'Do not proceed with purchase', 2) +ON CONFLICT DO NOTHING; + +-- Active: Inter-chapter workshop series +INSERT INTO proposals (id, community_id, author_id, title, description, status, voting_method, topic_id, voting_starts_at, voting_ends_at, created_at) VALUES + ('b0000001-0000-0000-0000-000000000007', + 'c0000001-0000-0000-0000-000000000003', + 'd0000002-0000-0000-0000-000000000009', + 'Summer 2026 Inter-Chapter Workshop Series', + E'## Proposal\n\nOrganize a series of traveling workshops where experts from each chapter teach at other locations.\n\n## Proposed Topics\n\n1. CNC routing basics (North Chapter)\n2. Electronics prototyping (Central Chapter)\n3. 3D printing materials (South Chapter)\n4. Woodworking joinery (East Chapter)\n\n## Schedule\n\nOne weekend per month, June-September 2026.', + 'voting', + 'approval', + 'a0000001-0000-0000-0000-000000000009', + NOW() - INTERVAL '3 days', + NOW() + INTERVAL '4 days', + NOW() - INTERVAL '12 days') +ON CONFLICT DO NOTHING; + +-- Options for workshop series +INSERT INTO proposal_options (id, proposal_id, label, description, sort_order) VALUES + ('e0000001-0000-0000-0000-000000000017', 'b0000001-0000-0000-0000-000000000007', 'Approve full series', 'Run all 4 workshops as proposed', 1), + ('e0000001-0000-0000-0000-000000000018', 'b0000001-0000-0000-0000-000000000007', 'Pilot with 2 workshops', 'Start with 2 workshops to test format', 2), + ('e0000001-0000-0000-0000-000000000019', 'b0000001-0000-0000-0000-000000000007', 'Defer to fall', 'Better timing after summer holidays', 3) +ON CONFLICT DO NOTHING; + +-- ============================================================================ +-- DELEGATIONS +-- ============================================================================ + +-- Alice delegates to Bob on architecture topics +INSERT INTO delegations (id, delegator_id, delegate_id, scope, community_id, topic_id, is_active, created_at) VALUES + ('de000001-0000-0000-0000-000000000001', + 'd0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000002', + 'topic', + 'c0000001-0000-0000-0000-000000000001', + 'a0000001-0000-0000-0000-000000000001', + true, + NOW() - INTERVAL '90 days') +ON CONFLICT DO NOTHING; + +-- Contributor delegates to Frank on policy topics in Civic Commons +INSERT INTO delegations (id, delegator_id, delegate_id, scope, community_id, topic_id, is_active, created_at) VALUES + ('de000001-0000-0000-0000-000000000002', + 'd0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000006', + 'topic', + 'c0000001-0000-0000-0000-000000000002', + 'a0000001-0000-0000-0000-000000000006', + true, + NOW() - INTERVAL '60 days') +ON CONFLICT DO NOTHING; + +-- Observer delegates to Elena globally in Civic Commons +INSERT INTO delegations (id, delegator_id, delegate_id, scope, community_id, is_active, created_at) VALUES + ('de000001-0000-0000-0000-000000000003', + 'd0000001-0000-0000-0000-000000000003', + 'd0000002-0000-0000-0000-000000000005', + 'community', + 'c0000001-0000-0000-0000-000000000002', + true, + NOW() - INTERVAL '45 days') +ON CONFLICT DO NOTHING; + +-- Revoked delegation example +INSERT INTO delegations (id, delegator_id, delegate_id, scope, community_id, is_active, revoked_at, created_at) VALUES + ('de000001-0000-0000-0000-000000000004', + 'd0000002-0000-0000-0000-000000000003', + 'd0000002-0000-0000-0000-000000000001', + 'community', + 'c0000001-0000-0000-0000-000000000001', + false, + NOW() - INTERVAL '20 days', + NOW() - INTERVAL '100 days') +ON CONFLICT DO NOTHING; + +-- ============================================================================ +-- MODERATION LOG +-- ============================================================================ + +-- Example moderation actions +INSERT INTO moderation_log (community_id, moderator_id, target_user_id, action_type, reason, details, created_at) VALUES + ('c0000001-0000-0000-0000-000000000001', + 'd0000001-0000-0000-0000-000000000002', + NULL, + 'content_edit', + 'Updated RFC-001 description for clarity', + '{"proposal_id": "b0000001-0000-0000-0000-000000000001", "field": "description", "change_type": "formatting"}', + NOW() - INTERVAL '40 days'), + + ('c0000001-0000-0000-0000-000000000002', + 'd0000001-0000-0000-0000-000000000002', + 'd0000002-0000-0000-0000-000000000009', + 'warning', + 'Off-topic discussion in budget proposal thread', + '{"rule": "community_guidelines_3", "comment_id": "comment_example_001"}', + NOW() - INTERVAL '8 days'), + + ('c0000001-0000-0000-0000-000000000003', + 'd0000001-0000-0000-0000-000000000002', + NULL, + 'proposal_extended', + 'Extended voting deadline by 48 hours due to technical issues', + '{"proposal_id": "b0000001-0000-0000-0000-000000000006", "original_end": "2026-01-10T00:00:00Z", "new_end": "2026-01-12T00:00:00Z"}', + NOW() - INTERVAL '39 days'); + +-- ============================================================================ +-- COMMENTS (for deliberation history) +-- ============================================================================ + +INSERT INTO comments (id, proposal_id, author_id, content, created_at) VALUES + ('cc000001-0000-0000-0000-000000000001', + 'b0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000002', + 'I support this RFC. Tokio has the best ecosystem support and most active maintenance.', + NOW() - INTERVAL '42 days'), + + ('cc000001-0000-0000-0000-000000000002', + 'b0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000004', + 'Concerned about async-std users being left behind. Can we ensure the migration path is clear?', + NOW() - INTERVAL '41 days'), + + ('cc000001-0000-0000-0000-000000000003', + 'b0000001-0000-0000-0000-000000000001', + 'd0000002-0000-0000-0000-000000000001', + 'Good point, David. I will add a migration guide section to the RFC.', + NOW() - INTERVAL '40 days'), + + ('cc000001-0000-0000-0000-000000000004', + 'b0000001-0000-0000-0000-000000000002', + 'd0000002-0000-0000-0000-000000000010', + 'From a security perspective, I strongly prefer the 6-month deprecation window. It gives downstream users adequate time to update.', + NOW() - INTERVAL '10 days'), + + ('cc000001-0000-0000-0000-000000000005', + 'b0000001-0000-0000-0000-000000000005', + 'd0000002-0000-0000-0000-000000000007', + 'I think we should prioritize the legal fund given the upcoming policy changes at the regional level.', + NOW() - INTERVAL '5 days') +ON CONFLICT DO NOTHING; + +-- ============================================================================ +-- DELEGATE PROFILES +-- ============================================================================ + +INSERT INTO delegate_profiles (user_id, display_name, bio, accepting_delegations, delegation_policy, total_delegators) VALUES + ('d0000002-0000-0000-0000-000000000002', + 'Bob Martinez', + 'Aurora core maintainer since 2023. Focused on runtime and async systems.', + true, + 'I vote based on technical merit and long-term maintainability. I prioritize backwards compatibility.', + 3), + + ('d0000002-0000-0000-0000-000000000006', + 'Frank Weber', + 'Policy researcher and community organizer. 10 years experience in participatory democracy.', + true, + 'I consult with delegators before major votes. I prioritize inclusive decision-making processes.', + 5), + + ('d0000002-0000-0000-0000-000000000005', + 'Elena Rossi', + 'Community manager for Civic Commons. Focused on member engagement and organizational health.', + true, + 'I vote to maximize member participation and organizational sustainability.', + 2) +ON CONFLICT (user_id) DO NOTHING; diff --git a/backend/src/api/analytics.rs b/backend/src/api/analytics.rs new file mode 100644 index 0000000..fd40f04 --- /dev/null +++ b/backend/src/api/analytics.rs @@ -0,0 +1,147 @@ +//! Governance Analytics API endpoints. + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + routing::get, + Json, Router, +}; +use chrono::NaiveDate; +use serde::Deserialize; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::governance_analytics::{ + AnalyticsService, GovernanceHealth, ParticipationSnapshot, +}; + +// ============================================================================ +// Query Parameters +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct TrendsQuery { + pub days: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ExportQuery { + pub start_date: NaiveDate, + pub end_date: NaiveDate, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get participation trends for a community +async fn get_participation_trends( + _auth: AuthUser, + Path(community_id): Path, + Query(query): Query, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let days = query.days.unwrap_or(30); + let trends = AnalyticsService::get_participation_trends(&pool, community_id, days) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(trends)) +} + +/// Get current governance health for a community +async fn get_health( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let health = AnalyticsService::get_health(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(health)) +} + +/// Get delegation analytics for a community +async fn get_delegation_analytics( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let analytics = AnalyticsService::get_delegation_analytics(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(analytics)) +} + +/// Get decision load metrics for a community +async fn get_decision_load( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let load = AnalyticsService::get_decision_load(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(load)) +} + +/// Get voting method comparison for a community +async fn get_voting_method_comparison( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let comparison = AnalyticsService::get_voting_method_comparison(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(comparison)) +} + +/// Get full analytics dashboard for a community +async fn get_dashboard( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let dashboard = AnalyticsService::get_dashboard(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(dashboard)) +} + +/// Export analytics data for a date range +async fn export_data( + _auth: AuthUser, + Path(community_id): Path, + Query(query): Query, + State(pool): State, +) -> Result, (StatusCode, String)> { + let data = AnalyticsService::export_data(&pool, community_id, query.start_date, query.end_date) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(data)) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/communities/{community_id}/analytics/dashboard", get(get_dashboard)) + .route("/api/communities/{community_id}/analytics/health", get(get_health)) + .route("/api/communities/{community_id}/analytics/participation", get(get_participation_trends)) + .route("/api/communities/{community_id}/analytics/delegation", get(get_delegation_analytics)) + .route("/api/communities/{community_id}/analytics/decision-load", get(get_decision_load)) + .route("/api/communities/{community_id}/analytics/voting-methods", get(get_voting_method_comparison)) + .route("/api/communities/{community_id}/analytics/export", get(export_data)) + .with_state(pool) +} diff --git a/backend/src/api/approvals.rs b/backend/src/api/approvals.rs new file mode 100644 index 0000000..d679a36 --- /dev/null +++ b/backend/src/api/approvals.rs @@ -0,0 +1,266 @@ +//! Approval workflow API endpoints for user registration and community creation. + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; +use super::permissions::{require_permission, perms}; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct PendingRegistration { + pub id: Uuid, + pub username: String, + pub email: String, + pub display_name: Option, + pub status: String, + pub created_at: DateTime, + pub expires_at: Option>, +} + +#[derive(Debug, Serialize)] +pub struct PendingCommunity { + pub id: Uuid, + pub name: String, + pub slug: String, + pub description: Option, + pub requested_by: Uuid, + pub requested_by_username: Option, + pub status: String, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct ListPendingQuery { + pub status: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ReviewRequest { + pub approve: bool, + pub reason: Option, +} + +#[derive(Debug, Serialize)] +pub struct ReviewResponse { + pub success: bool, + pub created_id: Option, + pub message: String, +} + +// ============================================================================ +// Registration Approval Handlers +// ============================================================================ + +/// List pending registrations (admin only) +async fn list_pending_registrations( + auth: AuthUser, + State(pool): State, + Query(query): Query, +) -> Result>, (StatusCode, String)> { + require_permission(&pool, auth.user_id, perms::USER_MANAGE, None).await?; + + let status_filter = query.status.unwrap_or_else(|| "pending".to_string()); + + let registrations = sqlx::query!( + r#"SELECT id, username, email, display_name, status, created_at, expires_at + FROM pending_registrations + WHERE status = $1 + ORDER BY created_at DESC + LIMIT 100"#, + status_filter + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(registrations.into_iter().map(|r| PendingRegistration { + id: r.id, + username: r.username, + email: r.email, + display_name: r.display_name, + status: r.status.unwrap_or_default(), + created_at: r.created_at.unwrap_or_else(Utc::now), + expires_at: r.expires_at, + }).collect())) +} + +/// Review a pending registration (approve or reject) +async fn review_registration( + auth: AuthUser, + State(pool): State, + Path(pending_id): Path, + Json(req): Json, +) -> Result, (StatusCode, String)> { + require_permission(&pool, auth.user_id, perms::USER_MANAGE, None).await?; + + if req.approve { + // Approve registration + let result = sqlx::query_scalar!( + "SELECT approve_registration($1, $2)", + pending_id, + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| { + let msg = e.to_string(); + if msg.contains("not found") || msg.contains("already processed") { + (StatusCode::NOT_FOUND, "Pending registration not found or already processed".to_string()) + } else if msg.contains("expired") { + (StatusCode::GONE, "Registration request has expired".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, msg) + } + })?; + + Ok(Json(ReviewResponse { + success: true, + created_id: result, + message: "Registration approved, user created".to_string(), + })) + } else { + // Reject registration + let success = sqlx::query_scalar!( + "SELECT reject_registration($1, $2, $3)", + pending_id, + auth.user_id, + req.reason + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if success { + Ok(Json(ReviewResponse { + success: true, + created_id: None, + message: "Registration rejected".to_string(), + })) + } else { + Err((StatusCode::NOT_FOUND, "Pending registration not found or already processed".to_string())) + } + } +} + +// ============================================================================ +// Community Approval Handlers +// ============================================================================ + +/// List pending community requests (admin only) +async fn list_pending_communities( + auth: AuthUser, + State(pool): State, + Query(query): Query, +) -> Result>, (StatusCode, String)> { + require_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + let status_filter = query.status.unwrap_or_else(|| "pending".to_string()); + + let communities = sqlx::query!( + r#"SELECT pc.id, pc.name, pc.slug, pc.description, pc.requested_by, + pc.status, pc.created_at, u.username as requester_username + FROM pending_communities pc + LEFT JOIN users u ON u.id = pc.requested_by + WHERE pc.status = $1 + ORDER BY pc.created_at DESC + LIMIT 100"#, + status_filter + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(communities.into_iter().map(|c| PendingCommunity { + id: c.id, + name: c.name, + slug: c.slug, + description: c.description, + requested_by: c.requested_by, + requested_by_username: Some(c.requester_username), + status: c.status.unwrap_or_default(), + created_at: c.created_at.unwrap_or_else(Utc::now), + }).collect())) +} + +/// Review a pending community request (approve or reject) +async fn review_community( + auth: AuthUser, + State(pool): State, + Path(pending_id): Path, + Json(req): Json, +) -> Result, (StatusCode, String)> { + require_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + if req.approve { + // Approve community + let result = sqlx::query_scalar!( + "SELECT approve_community($1, $2)", + pending_id, + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| { + let msg = e.to_string(); + if msg.contains("not found") || msg.contains("already processed") { + (StatusCode::NOT_FOUND, "Pending community not found or already processed".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, msg) + } + })?; + + Ok(Json(ReviewResponse { + success: true, + created_id: result, + message: "Community approved and created".to_string(), + })) + } else { + // Reject community + let success = sqlx::query_scalar!( + "SELECT reject_community($1, $2, $3)", + pending_id, + auth.user_id, + req.reason + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if success { + Ok(Json(ReviewResponse { + success: true, + created_id: None, + message: "Community request rejected".to_string(), + })) + } else { + Err((StatusCode::NOT_FOUND, "Pending community not found or already processed".to_string())) + } + } +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/approvals/registrations", get(list_pending_registrations)) + .route("/api/approvals/registrations/{id}", post(review_registration)) + .route("/api/approvals/communities", get(list_pending_communities)) + .route("/api/approvals/communities/{id}", post(review_community)) + .with_state(pool) +} diff --git a/backend/src/api/auth.rs b/backend/src/api/auth.rs new file mode 100644 index 0000000..38b137a --- /dev/null +++ b/backend/src/api/auth.rs @@ -0,0 +1,297 @@ +use axum::{extract::State, http::StatusCode, routing::post, Extension, Json, Router}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::auth::{create_token, hash_password, verify_password, AuthUser}; +use crate::config::Config; +use crate::demo; + +#[derive(Debug, Deserialize)] +pub struct RegisterRequest { + pub username: String, + pub email: String, + pub password: String, + pub display_name: Option, + pub invitation_code: Option, +} + +#[derive(Debug, Deserialize)] +pub struct LoginRequest { + pub username: String, + pub password: String, +} + +#[derive(Debug, Serialize)] +pub struct AuthResponse { + pub token: String, + pub user: UserInfo, +} + +#[derive(Debug, Serialize)] +pub struct UserInfo { + pub id: Uuid, + pub username: String, + pub email: String, + pub display_name: Option, +} + +#[derive(Debug, Serialize)] +pub struct MeResponse { + pub id: Uuid, + pub username: String, +} + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/auth/register", post(register)) + .route("/api/auth/login", post(login)) + .route("/api/auth/me", post(me)) + .with_state(pool) +} + +async fn register( + State(pool): State, + Extension(config): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check registration settings + let settings = sqlx::query!( + "SELECT registration_enabled, registration_mode FROM instance_settings LIMIT 1" + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let registration_mode = if let Some(s) = &settings { + if !s.registration_enabled { + return Err((StatusCode::FORBIDDEN, "Registration is currently disabled".to_string())); + } + if s.registration_mode == "invite_only" && req.invitation_code.is_none() { + return Err((StatusCode::FORBIDDEN, "Registration requires an invitation code".to_string())); + } + s.registration_mode.clone() + } else { + "open".to_string() + }; + + // Validate invitation code if provided + let invitation_community_id: Option = if let Some(code) = &req.invitation_code { + let validation = sqlx::query!( + r#"SELECT id, email, community_id, is_active, expires_at, max_uses, uses_count + FROM invitations WHERE code = $1"#, + code + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match validation { + None => return Err((StatusCode::BAD_REQUEST, "Invalid invitation code".to_string())), + Some(inv) => { + if !inv.is_active.unwrap_or(false) { + return Err((StatusCode::BAD_REQUEST, "Invitation is no longer active".to_string())); + } + if let Some(exp) = inv.expires_at { + if exp < chrono::Utc::now() { + return Err((StatusCode::BAD_REQUEST, "Invitation has expired".to_string())); + } + } + if let Some(max) = inv.max_uses { + if inv.uses_count.unwrap_or(0) >= max { + return Err((StatusCode::BAD_REQUEST, "Invitation has reached maximum uses".to_string())); + } + } + if let Some(email) = &inv.email { + if email != &req.email { + return Err((StatusCode::BAD_REQUEST, "This invitation is for a different email address".to_string())); + } + } + inv.community_id + } + } + } else { + None + }; + + let password_hash = hash_password(&req.password) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // First user becomes admin (bypass approval for first user) + let user_count = sqlx::query_scalar!("SELECT COUNT(*) FROM users") + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + let is_first_user = user_count == 0; + + // Get invitation ID if code provided + let invitation_id: Option = if let Some(code) = &req.invitation_code { + sqlx::query_scalar!("SELECT id FROM invitations WHERE code = $1", code) + .fetch_optional(&pool) + .await + .ok() + .flatten() + } else { + None + }; + + // Handle approval mode (except for first user who becomes admin) + if registration_mode == "approval" && !is_first_user { + // Create pending registration instead of user + sqlx::query!( + r#"INSERT INTO pending_registrations (username, email, password_hash, display_name, invitation_id) + VALUES ($1, $2, $3, $4, $5)"#, + req.username, + req.email, + password_hash, + req.display_name, + invitation_id + ) + .execute(&pool) + .await + .map_err(|e| { + if e.to_string().contains("duplicate key") { + (StatusCode::CONFLICT, "Username or email already exists or pending approval".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) + } + })?; + + // Return a special response indicating pending approval + return Err((StatusCode::ACCEPTED, "Registration submitted for approval. You will be notified when approved.".to_string())); + } + + // Direct registration (open mode, invite_only with valid invite, or first user) + let user = sqlx::query_as!( + crate::models::User, + r#" + INSERT INTO users (username, email, password_hash, display_name, is_admin) + VALUES ($1, $2, $3, $4, $5) + RETURNING * + "#, + req.username, + req.email, + password_hash, + req.display_name, + is_first_user + ) + .fetch_one(&pool) + .await + .map_err(|e| { + if e.to_string().contains("duplicate key") { + (StatusCode::CONFLICT, "Username or email already exists".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) + } + })?; + + // Use invitation if provided (records usage and links user) + if let Some(code) = &req.invitation_code { + sqlx::query!("SELECT use_invitation($1, $2, $3)", code, user.id, req.email.as_str()) + .fetch_one(&pool) + .await + .ok(); // Ignore errors - user is already created + } + + // If invitation was for a specific community, add user as member + if let Some(community_id) = invitation_community_id { + sqlx::query!( + "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, 'member') ON CONFLICT DO NOTHING", + user.id, + community_id + ) + .execute(&pool) + .await + .ok(); + } + + let token = create_token(user.id, &user.username, &config.jwt_secret) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(AuthResponse { + token, + user: UserInfo { + id: user.id, + username: user.username, + email: user.email, + display_name: user.display_name, + }, + })) +} + +async fn login( + State(pool): State, + Extension(config): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // In demo mode, check if this is a demo account first + if config.is_demo() && demo::is_demo_account(&req.username) { + if demo::verify_demo_password(&req.username, &req.password) { + // Fetch the demo user from DB + let user = sqlx::query_as!( + crate::models::User, + "SELECT * FROM users WHERE username = $1 AND is_active = true", + req.username + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::UNAUTHORIZED, "Demo account not found".to_string()))?; + + let token = create_token(user.id, &user.username, &config.jwt_secret) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + return Ok(Json(AuthResponse { + token, + user: UserInfo { + id: user.id, + username: user.username, + email: user.email, + display_name: user.display_name, + }, + })); + } else { + return Err((StatusCode::UNAUTHORIZED, "Invalid credentials".to_string())); + } + } + + // Standard authentication flow + let user = sqlx::query_as!( + crate::models::User, + "SELECT * FROM users WHERE username = $1 AND is_active = true", + req.username + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::UNAUTHORIZED, "Invalid credentials".to_string()))?; + + let valid = verify_password(&req.password, &user.password_hash) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if !valid { + return Err((StatusCode::UNAUTHORIZED, "Invalid credentials".to_string())); + } + + let token = create_token(user.id, &user.username, &config.jwt_secret) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(AuthResponse { + token, + user: UserInfo { + id: user.id, + username: user.username, + email: user.email, + display_name: user.display_name, + }, + })) +} + +async fn me(auth: AuthUser) -> Json { + Json(MeResponse { + id: auth.user_id, + username: auth.username, + }) +} diff --git a/backend/src/api/comments.rs b/backend/src/api/comments.rs new file mode 100644 index 0000000..f67a88f --- /dev/null +++ b/backend/src/api/comments.rs @@ -0,0 +1,180 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::get, + Extension, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::HookContext; +use crate::plugins::PluginManager; +use crate::plugins::PluginError; + +#[derive(Debug, Serialize)] +pub struct Comment { + pub id: Uuid, + pub proposal_id: Uuid, + pub author_id: Uuid, + pub author_name: String, + pub content: String, + pub parent_id: Option, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct CreateComment { + pub content: String, + pub parent_id: Option, +} + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/proposals/{proposal_id}/comments", get(list_comments).post(create_comment)) + .with_state(pool) +} + +async fn list_comments( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let comments = sqlx::query!( + r#" + SELECT c.id, c.proposal_id, c.author_id, c.content, c.parent_id, c.created_at, + u.username as author_name + FROM comments c + JOIN users u ON c.author_id = u.id + WHERE c.proposal_id = $1 + ORDER BY c.created_at ASC + "#, + proposal_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = comments + .into_iter() + .map(|c| Comment { + id: c.id, + proposal_id: c.proposal_id, + author_id: c.author_id, + author_name: c.author_name, + content: c.content, + parent_id: c.parent_id, + created_at: c.created_at, + }) + .collect(); + + Ok(Json(result)) +} + +async fn create_comment( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Get proposal author for notification + let proposal = sqlx::query!( + "SELECT author_id, community_id, title FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + let filtered = plugins + .apply_filters( + "comment.create", + HookContext { + pool: pool.clone(), + community_id: Some(proposal.community_id), + actor_user_id: Some(auth.user_id), + }, + json!({ + "proposal_id": proposal_id, + "content": req.content, + "parent_id": req.parent_id, + }), + ) + .await + .map_err(|e| match e { + PluginError::Message(m) => (StatusCode::BAD_REQUEST, m), + PluginError::Sqlx(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()), + })?; + + let content = filtered + .get("content") + .and_then(|v| v.as_str()) + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid comment.create filter output".to_string(), + ))? + .to_string(); + + let parent_id = match filtered.get("parent_id") { + Some(v) if v.is_null() => None, + Some(v) => v + .as_str() + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid comment.create filter output".to_string(), + )) + .map(Some)?, + None => None, + }; + + let comment = sqlx::query!( + r#" + INSERT INTO comments (proposal_id, author_id, content, parent_id) + VALUES ($1, $2, $3, $4) + RETURNING id, created_at + "#, + proposal_id, + auth.user_id, + content, + parent_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + plugins + .do_action( + "comment.created", + HookContext { + pool: pool.clone(), + community_id: Some(proposal.community_id), + actor_user_id: Some(auth.user_id), + }, + serde_json::json!({ + "proposal_id": proposal_id, + "proposal_title": proposal.title, + "proposal_author_id": proposal.author_id, + "commenter_id": auth.user_id, + "commenter_name": auth.username, + "content": content, + }), + ) + .await; + + Ok(Json(Comment { + id: comment.id, + proposal_id, + author_id: auth.user_id, + author_name: auth.username.clone(), + content, + parent_id, + created_at: comment.created_at, + })) +} diff --git a/backend/src/api/communities.rs b/backend/src/api/communities.rs new file mode 100644 index 0000000..bc08e4a --- /dev/null +++ b/backend/src/api/communities.rs @@ -0,0 +1,541 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Extension, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::models::community::CommunityResponse; +use crate::plugins::{HookContext, PluginManager}; +use super::permissions::{user_has_permission, perms}; + +#[derive(Debug, Deserialize)] +pub struct CreateCommunityRequest { + pub name: String, + pub slug: String, + pub description: Option, +} + +#[derive(Debug, Serialize)] +pub struct CommunityDetail { + #[serde(flatten)] + pub community: CommunityResponse, + pub member_count: i64, + pub proposal_count: i64, +} + +use axum::routing::put; + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/communities", get(list_communities).post(create_community)) + .route("/api/communities/{id}", put(update_community)) + .route("/api/communities/{id}/details", get(get_community_details)) + .route("/api/communities/{id}/join", post(join_community)) + .route("/api/communities/{id}/leave", post(leave_community)) + .route("/api/communities/{id}/members", get(list_members)) + .route("/api/communities/{id}/membership", get(check_membership)) + .route("/api/users/me/communities", get(my_communities)) + .route("/api/activity/recent", get(recent_activity)) + .with_state(pool) +} + +async fn list_communities( + State(pool): State, +) -> Result>, String> { + let communities = sqlx::query_as!( + crate::models::Community, + "SELECT * FROM communities WHERE is_active = true ORDER BY created_at DESC LIMIT 100" + ) + .fetch_all(&pool) + .await + .map_err(|e| e.to_string())?; + + Ok(Json(communities.into_iter().map(CommunityResponse::from).collect())) +} + +async fn create_community( + auth: AuthUser, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check platform mode for community creation permissions + let settings = sqlx::query!( + "SELECT platform_mode FROM instance_settings LIMIT 1" + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if let Some(s) = settings { + match s.platform_mode.as_str() { + "single_community" => { + return Err((StatusCode::FORBIDDEN, "This platform is dedicated to a single community".to_string())); + } + "admin_only" => { + // Check platform admin or community create permission + let can_create = user_has_permission(&pool, auth.user_id, perms::COMMUNITY_CREATE, None).await?; + if !can_create { + return Err((StatusCode::FORBIDDEN, "Only administrators can create communities".to_string())); + } + } + "approval" => { + // Check if user has direct create permission (admins bypass approval) + let can_create = user_has_permission(&pool, auth.user_id, perms::COMMUNITY_CREATE, None).await?; + if !can_create { + // Create pending community request instead + sqlx::query!( + r#"INSERT INTO pending_communities (name, slug, description, requested_by) + VALUES ($1, $2, $3, $4)"#, + req.name, + req.slug, + req.description, + auth.user_id + ) + .execute(&pool) + .await + .map_err(|e| { + if e.to_string().contains("duplicate key") { + (StatusCode::CONFLICT, "A community with this slug already exists or is pending approval".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) + } + })?; + + return Err((StatusCode::ACCEPTED, "Community request submitted for approval".to_string())); + } + } + _ => {} // "open" mode - anyone can create + } + } + + let community = sqlx::query_as!( + crate::models::Community, + r#" + INSERT INTO communities (name, slug, description) + VALUES ($1, $2, $3) + RETURNING * + "#, + req.name, + req.slug, + req.description + ) + .fetch_one(&pool) + .await + .map_err(|e| { + if e.to_string().contains("duplicate key") { + (StatusCode::CONFLICT, "Community name or slug already exists".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) + } + })?; + + // Add creator as admin member + sqlx::query!( + "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, 'admin')", + auth.user_id, + community.id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + plugins + .ensure_default_community_plugins(community.id, Some(auth.user_id)) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Community '{}' created by user {}", community.name, auth.username); + + Ok(Json(CommunityResponse::from(community))) +} + +async fn get_community_details( + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let community = sqlx::query_as!( + crate::models::Community, + "SELECT * FROM communities WHERE id = $1 AND is_active = true", + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Community not found".to_string()))?; + + let member_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM community_members WHERE community_id = $1", + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + let proposal_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM proposals WHERE community_id = $1", + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + Ok(Json(CommunityDetail { + community: CommunityResponse::from(community), + member_count, + proposal_count, + })) +} + +async fn join_community( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Extension(plugins): Extension>, +) -> Result, (StatusCode, String)> { + let existing = sqlx::query!( + "SELECT id FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if existing.is_some() { + return Ok(Json(serde_json::json!({"status": "already_member"}))); + } + + let join_payload = serde_json::json!({ + "community_id": community_id.to_string(), + "user_id": auth.user_id.to_string(), + "username": auth.username.clone(), + "role": "member", + }); + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(community_id), + actor_user_id: Some(auth.user_id), + }; + let filtered = plugins + .apply_filters("member.join.validate", ctx, join_payload) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if let Some(err) = filtered.get("_error").and_then(|v| v.as_str()) { + return Err((StatusCode::BAD_REQUEST, err.to_string())); + } + + let role = filtered.get("role").and_then(|v| v.as_str()).unwrap_or("member"); + + sqlx::query!( + "INSERT INTO community_members (user_id, community_id, role) VALUES ($1, $2, $3)", + auth.user_id, + community_id, + role + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(community_id), + actor_user_id: Some(auth.user_id), + }; + let _ = plugins.do_action("member.join", ctx, serde_json::json!({ + "community_id": community_id.to_string(), + "user_id": auth.user_id.to_string(), + "username": auth.username.clone(), + "role": role, + })).await; + + tracing::info!("User {} joined community {}", auth.username, community_id); + Ok(Json(serde_json::json!({"status": "joined"}))) +} + +async fn leave_community( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Extension(plugins): Extension>, +) -> Result, (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if membership.is_none() { + return Ok(Json(serde_json::json!({"status": "not_member"}))); + } + + let role = membership.unwrap().role.clone(); + + if role == "admin" { + let admin_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM community_members WHERE community_id = $1 AND role = 'admin'", + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + if admin_count <= 1 { + return Err((StatusCode::BAD_REQUEST, "Cannot leave: you are the only admin".to_string())); + } + } + + let leave_payload = serde_json::json!({ + "community_id": community_id.to_string(), + "user_id": auth.user_id.to_string(), + "username": auth.username.clone(), + "role": role.clone(), + }); + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(community_id), + actor_user_id: Some(auth.user_id), + }; + let filtered = plugins + .apply_filters("member.leave.validate", ctx, leave_payload) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if let Some(err) = filtered.get("_error").and_then(|v| v.as_str()) { + return Err((StatusCode::BAD_REQUEST, err.to_string())); + } + + sqlx::query!( + "DELETE FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(community_id), + actor_user_id: Some(auth.user_id), + }; + let _ = plugins.do_action("member.leave", ctx, serde_json::json!({ + "community_id": community_id.to_string(), + "user_id": auth.user_id.to_string(), + "username": auth.username.clone(), + "role": role, + })).await; + + tracing::info!("User {} left community {}", auth.username, community_id); + Ok(Json(serde_json::json!({"status": "left"}))) +} + +async fn check_membership( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) => Ok(Json(serde_json::json!({"is_member": true, "role": m.role}))), + None => Ok(Json(serde_json::json!({"is_member": false}))), + } +} + +#[derive(Debug, Serialize)] +pub struct MemberInfo { + pub user_id: Uuid, + pub username: String, + pub display_name: Option, + pub role: String, + pub joined_at: DateTime, +} + +async fn list_members( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let members = sqlx::query!( + r#" + SELECT cm.user_id, cm.role, cm.joined_at, u.username, u.display_name + FROM community_members cm + JOIN users u ON cm.user_id = u.id + WHERE cm.community_id = $1 + ORDER BY cm.joined_at + "#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = members + .into_iter() + .map(|m| MemberInfo { + user_id: m.user_id, + username: m.username, + display_name: m.display_name, + role: m.role, + joined_at: m.joined_at, + }) + .collect(); + + Ok(Json(result)) +} + +async fn my_communities( + auth: AuthUser, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let communities = sqlx::query_as!( + crate::models::Community, + r#" + SELECT c.* FROM communities c + JOIN community_members cm ON c.id = cm.community_id + WHERE cm.user_id = $1 AND c.is_active = true + ORDER BY cm.joined_at DESC + "#, + auth.user_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(communities.into_iter().map(CommunityResponse::from).collect())) +} + +#[derive(Debug, Serialize)] +pub struct ActivityItem { + pub activity_type: String, + pub title: String, + pub description: String, + pub link: String, + pub created_at: DateTime, +} + +async fn recent_activity( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let proposals = sqlx::query!( + r#" + SELECT p.id, p.title, p.status as "status: String", p.created_at, c.slug as community_slug + FROM proposals p + JOIN communities c ON p.community_id = c.id + WHERE c.is_active = true + ORDER BY p.created_at DESC + LIMIT 10 + "# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let mut activities: Vec = proposals.into_iter().map(|p| { + let desc = match p.status.as_str() { + "voting" => "Now open for voting", + "discussion" => "Open for discussion", + "closed" => "Voting completed", + _ => "New proposal created", + }; + ActivityItem { + activity_type: "proposal".to_string(), + title: p.title, + description: desc.to_string(), + link: format!("/proposals/{}", p.id), + created_at: p.created_at, + } + }).collect(); + + let communities = sqlx::query!( + "SELECT name, slug, created_at FROM communities WHERE is_active = true ORDER BY created_at DESC LIMIT 5" + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + for c in communities { + activities.push(ActivityItem { + activity_type: "community".to_string(), + title: c.name, + description: "New community created".to_string(), + link: format!("/communities/{}", c.slug), + created_at: c.created_at, + }); + } + + activities.sort_by(|a, b| b.created_at.cmp(&a.created_at)); + activities.truncate(15); + + Ok(Json(activities)) +} + +#[derive(Debug, Deserialize)] +pub struct UpdateCommunity { + pub name: Option, + pub description: Option, +} + +async fn update_community( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(payload): Json, +) -> Result, (StatusCode, String)> { + // Check if user is admin + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + community_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::FORBIDDEN, "Not a member of this community".to_string()))?; + + if membership.role != "admin" { + return Err((StatusCode::FORBIDDEN, "Only admins can edit the community".to_string())); + } + + let updated = sqlx::query_as!( + crate::models::Community, + r#"UPDATE communities + SET name = COALESCE($1, name), + description = COALESCE($2, description) + WHERE id = $3 AND is_active = true + RETURNING *"#, + payload.name, + payload.description, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Community '{}' updated", updated.name); + Ok(Json(CommunityResponse::from(updated))) +} diff --git a/backend/src/api/conflicts.rs b/backend/src/api/conflicts.rs new file mode 100644 index 0000000..0dc4604 --- /dev/null +++ b/backend/src/api/conflicts.rs @@ -0,0 +1,284 @@ +//! Conflict Resolution API endpoints. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::Deserialize; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::conflict_resolution::{ConflictCase, ConflictService}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct ReportConflictRequest { + pub conflict_type: String, + pub title: String, + pub description: String, + pub party_a_id: Uuid, + pub party_b_id: Option, + pub anonymous: bool, + pub severity: i32, +} + +#[derive(Debug, Deserialize)] +pub struct TransitionStatusRequest { + pub new_status: String, + pub notes: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ProposeCompromiseRequest { + pub title: String, + pub description: String, + pub proposed_actions: Value, + pub proposed_by_role: String, +} + +#[derive(Debug, Deserialize)] +pub struct RespondToCompromiseRequest { + pub party: String, + pub response: String, + pub feedback: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ScheduleSessionRequest { + pub scheduled_for: chrono::DateTime, + pub duration_minutes: i32, + pub agenda: Option, +} + +#[derive(Debug, Deserialize)] +pub struct AddNoteRequest { + pub content: String, + pub note_type: String, + pub session_id: Option, + pub is_confidential: bool, +} + +#[derive(Debug, Deserialize)] +pub struct AddMediatorRequest { + pub user_id: Uuid, + pub certification_level: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get active conflicts in a community +async fn get_active_conflicts( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let conflicts = ConflictService::get_active_conflicts(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(conflicts)) +} + +/// Get a specific conflict +async fn get_conflict( + _auth: AuthUser, + Path(conflict_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let conflict = ConflictService::get_conflict(&pool, conflict_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(conflict)) +} + +/// Report a new conflict +async fn report_conflict( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let conflict_id = ConflictService::report_conflict( + &pool, + community_id, + &req.title, + &req.description, + &req.conflict_type, + req.party_a_id, + req.party_b_id, + Some(auth.user_id), + req.anonymous, + req.severity, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": conflict_id}))) +} + +/// Transition conflict status +async fn transition_status( + auth: AuthUser, + Path(conflict_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ConflictService::transition_status( + &pool, + conflict_id, + &req.new_status, + auth.user_id, + req.notes.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Propose a compromise +async fn propose_compromise( + auth: AuthUser, + Path(conflict_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let proposal_id = ConflictService::propose_compromise( + &pool, + conflict_id, + &req.title, + &req.description, + req.proposed_actions, + auth.user_id, + &req.proposed_by_role, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": proposal_id}))) +} + +/// Respond to a compromise proposal +async fn respond_to_compromise( + _auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ConflictService::respond_to_compromise( + &pool, + proposal_id, + &req.party, + &req.response, + req.feedback.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Schedule a mediation session +async fn schedule_session( + _auth: AuthUser, + Path(conflict_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let session_id = ConflictService::schedule_session( + &pool, + conflict_id, + req.scheduled_for, + req.duration_minutes, + req.agenda.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": session_id}))) +} + +/// Add a note to a conflict +async fn add_note( + auth: AuthUser, + Path(conflict_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let note_id = ConflictService::add_note( + &pool, + conflict_id, + req.session_id, + auth.user_id, + &req.content, + &req.note_type, + req.is_confidential, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": note_id}))) +} + +/// Get conflict statistics for a community +async fn get_statistics( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let stats = ConflictService::get_statistics(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(stats)) +} + +/// Add user to mediator pool +async fn add_to_mediator_pool( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let mediator_id = ConflictService::add_to_mediator_pool( + &pool, + community_id, + req.user_id, + req.certification_level.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": mediator_id}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Community conflicts + .route("/api/communities/{community_id}/conflicts", get(get_active_conflicts).post(report_conflict)) + .route("/api/communities/{community_id}/conflicts/stats", get(get_statistics)) + .route("/api/communities/{community_id}/mediators", post(add_to_mediator_pool)) + // Individual conflict operations + .route("/api/conflicts/{conflict_id}", get(get_conflict)) + .route("/api/conflicts/{conflict_id}/status", post(transition_status)) + .route("/api/conflicts/{conflict_id}/compromise", post(propose_compromise)) + .route("/api/conflicts/{conflict_id}/session", post(schedule_session)) + .route("/api/conflicts/{conflict_id}/note", post(add_note)) + // Compromise responses + .route("/api/compromises/{proposal_id}/respond", post(respond_to_compromise)) + .with_state(pool) +} diff --git a/backend/src/api/delegation.rs b/backend/src/api/delegation.rs new file mode 100644 index 0000000..688e836 --- /dev/null +++ b/backend/src/api/delegation.rs @@ -0,0 +1,565 @@ +//! Liquid Delegation API +//! +//! Implements fluid, reversible, topic-based vote delegation as described +//! in the Democracy Design manifesto. + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + routing::{delete, get}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, sqlx::Type)] +#[sqlx(type_name = "delegation_scope", rename_all = "lowercase")] +pub enum DelegationScope { + Global, + Community, + Topic, + Proposal, +} + +#[derive(Debug, Serialize)] +pub struct Delegation { + pub id: Uuid, + pub delegator_id: Uuid, + pub delegate_id: Uuid, + pub delegate_username: Option, + pub scope: DelegationScope, + pub community_id: Option, + pub topic_id: Option, + pub proposal_id: Option, + pub weight: f64, + pub is_active: bool, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct CreateDelegationRequest { + pub delegate_id: Uuid, + pub scope: DelegationScope, + pub community_id: Option, + pub topic_id: Option, + pub proposal_id: Option, + #[serde(default = "default_weight")] + pub weight: f64, +} + +fn default_weight() -> f64 { 1.0 } + +#[derive(Debug, Serialize)] +pub struct DelegateProfile { + pub user_id: Uuid, + pub username: String, + pub display_name: Option, + pub bio: Option, + pub accepting_delegations: bool, + pub delegation_policy: Option, + pub total_delegators: i32, + pub total_votes_cast: i32, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateProfileRequest { + pub display_name: Option, + pub bio: Option, + pub accepting_delegations: Option, + pub delegation_policy: Option, +} + +/// Represents a delegation chain from original delegator to final delegate. +/// Used for delegation chain visualization. +#[allow(dead_code)] +#[derive(Debug, Serialize)] +pub struct DelegationChain { + pub original_delegator_id: Uuid, + pub final_delegate_id: Uuid, + pub chain: Vec, + pub effective_weight: f64, +} + +#[allow(dead_code)] +#[derive(Debug, Serialize)] +pub struct ChainLink { + pub user_id: Uuid, + pub username: String, +} + +#[derive(Debug, Deserialize)] +pub struct ListDelegationsQuery { + #[allow(dead_code)] + pub scope: Option, + pub community_id: Option, + pub active_only: Option, +} + +#[derive(Debug, Serialize)] +pub struct Topic { + pub id: Uuid, + pub community_id: Uuid, + pub name: String, + pub slug: String, + pub description: Option, + pub parent_id: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreateTopicRequest { + pub name: String, + pub slug: String, + pub description: Option, + pub parent_id: Option, +} + +// ============================================================================ +// Delegation Handlers +// ============================================================================ + +/// Create a new delegation +async fn create_delegation( + auth: AuthUser, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Validate weight + if req.weight <= 0.0 || req.weight > 1.0 { + return Err((StatusCode::BAD_REQUEST, "Weight must be between 0 and 1".to_string())); + } + + // Check delegate exists + let delegate = sqlx::query!("SELECT username FROM users WHERE id = $1", req.delegate_id) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Delegate not found".to_string()))?; + + // Check if delegate is accepting delegations + let profile = sqlx::query!( + "SELECT accepting_delegations FROM delegate_profiles WHERE user_id = $1", + req.delegate_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if let Some(p) = profile { + if !p.accepting_delegations { + return Err((StatusCode::BAD_REQUEST, "Delegate is not accepting delegations".to_string())); + } + } + + // Deactivate existing delegation with same scope + sqlx::query!( + r#"UPDATE delegations + SET is_active = FALSE, revoked_at = NOW() + WHERE delegator_id = $1 + AND scope = $2::delegation_scope + AND is_active = TRUE + AND (community_id = $3 OR ($3 IS NULL AND community_id IS NULL)) + AND (topic_id = $4 OR ($4 IS NULL AND topic_id IS NULL)) + AND (proposal_id = $5 OR ($5 IS NULL AND proposal_id IS NULL))"#, + auth.user_id, + req.scope.clone() as DelegationScope, + req.community_id, + req.topic_id, + req.proposal_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Create new delegation (weight stored as default, simplified for now) + let delegation = sqlx::query!( + r#"INSERT INTO delegations (delegator_id, delegate_id, scope, community_id, topic_id, proposal_id) + VALUES ($1, $2, $3::delegation_scope, $4, $5, $6) + RETURNING id, delegator_id, delegate_id, scope as "scope: DelegationScope", + community_id, topic_id, proposal_id, is_active, created_at"#, + auth.user_id, + req.delegate_id, + req.scope as DelegationScope, + req.community_id, + req.topic_id, + req.proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e: sqlx::Error| { + if e.to_string().contains("cycle") { + (StatusCode::BAD_REQUEST, "Delegation would create a cycle".to_string()) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()) + } + })?; + + // Log the delegation + sqlx::query!( + r#"INSERT INTO delegation_log (delegation_id, delegator_id, delegate_id, action, scope, community_id, topic_id, proposal_id) + VALUES ($1, $2, $3, 'created', $4::delegation_scope, $5, $6, $7)"#, + delegation.id, + auth.user_id, + req.delegate_id, + req.scope as DelegationScope, + req.community_id, + req.topic_id, + req.proposal_id + ) + .execute(&pool) + .await + .ok(); + + // Update delegate's delegator count + sqlx::query!( + r#"INSERT INTO delegate_profiles (user_id, total_delegators) + VALUES ($1, 1) + ON CONFLICT (user_id) DO UPDATE SET total_delegators = delegate_profiles.total_delegators + 1"#, + req.delegate_id + ) + .execute(&pool) + .await + .ok(); + + Ok(Json(Delegation { + id: delegation.id, + delegator_id: delegation.delegator_id, + delegate_id: delegation.delegate_id, + delegate_username: Some(delegate.username), + scope: delegation.scope, + community_id: delegation.community_id, + topic_id: delegation.topic_id, + proposal_id: delegation.proposal_id, + weight: req.weight, + is_active: delegation.is_active, + created_at: delegation.created_at, + })) +} + +/// List user's outgoing delegations +async fn list_my_delegations( + auth: AuthUser, + State(pool): State, + Query(query): Query, +) -> Result>, (StatusCode, String)> { + let active_only = query.active_only.unwrap_or(true); + + let delegations = sqlx::query!( + r#"SELECT d.id, d.delegator_id, d.delegate_id, u.username as delegate_username, + d.scope as "scope: DelegationScope", d.community_id, d.topic_id, + d.proposal_id, d.is_active, d.created_at + FROM delegations d + JOIN users u ON d.delegate_id = u.id + WHERE d.delegator_id = $1 + AND ($2 = FALSE OR d.is_active = TRUE) + AND ($3::uuid IS NULL OR d.community_id = $3) + ORDER BY d.created_at DESC"#, + auth.user_id, + active_only, + query.community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(delegations.into_iter().map(|d| { + Delegation { + id: d.id, + delegator_id: d.delegator_id, + delegate_id: d.delegate_id, + delegate_username: Some(d.delegate_username), + scope: d.scope, + community_id: d.community_id, + topic_id: d.topic_id, + proposal_id: d.proposal_id, + weight: 1.0, + is_active: d.is_active, + created_at: d.created_at, + } + }).collect())) +} + +/// List delegations TO a user (they are the delegate) +async fn list_delegations_to_me( + auth: AuthUser, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let delegations = sqlx::query!( + r#"SELECT d.id, d.delegator_id, d.delegate_id, u.username as delegator_username, + d.scope as "scope: DelegationScope", d.community_id, d.topic_id, + d.proposal_id, d.is_active, d.created_at + FROM delegations d + JOIN users u ON d.delegator_id = u.id + WHERE d.delegate_id = $1 AND d.is_active = TRUE + ORDER BY d.created_at DESC"#, + auth.user_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(delegations.into_iter().map(|d| { + Delegation { + id: d.id, + delegator_id: d.delegator_id, + delegate_id: d.delegate_id, + delegate_username: Some(d.delegator_username), + scope: d.scope, + community_id: d.community_id, + topic_id: d.topic_id, + proposal_id: d.proposal_id, + weight: 1.0, + is_active: d.is_active, + created_at: d.created_at, + } + }).collect())) +} + +/// Revoke a delegation +async fn revoke_delegation( + auth: AuthUser, + Path(delegation_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let result = sqlx::query!( + r#"UPDATE delegations + SET is_active = FALSE, revoked_at = NOW() + WHERE id = $1 AND delegator_id = $2 AND is_active = TRUE + RETURNING delegate_id, scope as "scope: DelegationScope", community_id, topic_id, proposal_id"#, + delegation_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Delegation not found or already revoked".to_string()))?; + + // Log revocation + sqlx::query!( + r#"INSERT INTO delegation_log (delegation_id, delegator_id, delegate_id, action, scope, community_id, topic_id, proposal_id) + VALUES ($1, $2, $3, 'revoked', $4::delegation_scope, $5, $6, $7)"#, + delegation_id, + auth.user_id, + result.delegate_id, + result.scope as DelegationScope, + result.community_id, + result.topic_id, + result.proposal_id + ) + .execute(&pool) + .await + .ok(); + + // Update delegate's delegator count + sqlx::query!( + "UPDATE delegate_profiles SET total_delegators = GREATEST(0, total_delegators - 1) WHERE user_id = $1", + result.delegate_id + ) + .execute(&pool) + .await + .ok(); + + Ok(Json(serde_json::json!({"success": true}))) +} + +// ============================================================================ +// Delegate Profile Handlers +// ============================================================================ + +/// Get or create delegate profile +async fn get_my_profile( + auth: AuthUser, + State(pool): State, +) -> Result, (StatusCode, String)> { + let user = sqlx::query!("SELECT username FROM users WHERE id = $1", auth.user_id) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let profile = sqlx::query!( + r#"INSERT INTO delegate_profiles (user_id) + VALUES ($1) + ON CONFLICT (user_id) DO UPDATE SET user_id = $1 + RETURNING display_name, bio, accepting_delegations, delegation_policy, + total_delegators, total_votes_cast"#, + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(DelegateProfile { + user_id: auth.user_id, + username: user.username, + display_name: profile.display_name, + bio: profile.bio, + accepting_delegations: profile.accepting_delegations, + delegation_policy: profile.delegation_policy, + total_delegators: profile.total_delegators, + total_votes_cast: profile.total_votes_cast, + })) +} + +/// Update delegate profile +async fn update_my_profile( + auth: AuthUser, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let user = sqlx::query!("SELECT username FROM users WHERE id = $1", auth.user_id) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let profile = sqlx::query!( + r#"INSERT INTO delegate_profiles (user_id, display_name, bio, accepting_delegations, delegation_policy) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (user_id) DO UPDATE SET + display_name = COALESCE($2, delegate_profiles.display_name), + bio = COALESCE($3, delegate_profiles.bio), + accepting_delegations = COALESCE($4, delegate_profiles.accepting_delegations), + delegation_policy = COALESCE($5, delegate_profiles.delegation_policy), + updated_at = NOW() + RETURNING display_name, bio, accepting_delegations, delegation_policy, + total_delegators, total_votes_cast"#, + auth.user_id, + req.display_name, + req.bio, + req.accepting_delegations, + req.delegation_policy + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(DelegateProfile { + user_id: auth.user_id, + username: user.username, + display_name: profile.display_name, + bio: profile.bio, + accepting_delegations: profile.accepting_delegations, + delegation_policy: profile.delegation_policy, + total_delegators: profile.total_delegators, + total_votes_cast: profile.total_votes_cast, + })) +} + +/// List delegates (users accepting delegations) +async fn list_delegates( + State(pool): State, + Query(_query): Query, +) -> Result>, (StatusCode, String)> { + let profiles = sqlx::query!( + r#"SELECT dp.user_id, u.username, dp.display_name, dp.bio, + dp.accepting_delegations, dp.delegation_policy, + dp.total_delegators, dp.total_votes_cast + FROM delegate_profiles dp + JOIN users u ON dp.user_id = u.id + WHERE dp.accepting_delegations = TRUE + ORDER BY dp.total_delegators DESC + LIMIT 50"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(profiles.into_iter().map(|p| DelegateProfile { + user_id: p.user_id, + username: p.username, + display_name: p.display_name, + bio: p.bio, + accepting_delegations: p.accepting_delegations, + delegation_policy: p.delegation_policy, + total_delegators: p.total_delegators, + total_votes_cast: p.total_votes_cast, + }).collect())) +} + +// ============================================================================ +// Topic Handlers +// ============================================================================ + +/// List topics for a community +async fn list_topics( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let topics = sqlx::query_as!( + Topic, + r#"SELECT id, community_id, name, slug, description, parent_id + FROM topics + WHERE community_id = $1 + ORDER BY name"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(topics)) +} + +/// Create a topic +async fn create_topic( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check user is community admin/moderator + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + community_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::FORBIDDEN, "Not a member of this community".to_string()))?; + + if membership.role != "admin" && membership.role != "moderator" { + return Err((StatusCode::FORBIDDEN, "Only admins/moderators can create topics".to_string())); + } + + let topic = sqlx::query_as!( + Topic, + r#"INSERT INTO topics (community_id, name, slug, description, parent_id) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, community_id, name, slug, description, parent_id"#, + community_id, + req.name, + req.slug, + req.description, + req.parent_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(topic)) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Delegations + .route("/api/delegations", get(list_my_delegations).post(create_delegation)) + .route("/api/delegations/to-me", get(list_delegations_to_me)) + .route("/api/delegations/{delegation_id}", delete(revoke_delegation)) + // Delegate profiles + .route("/api/delegates", get(list_delegates)) + .route("/api/delegates/me", get(get_my_profile).put(update_my_profile)) + // Topics + .route("/api/communities/{community_id}/topics", get(list_topics).post(create_topic)) + .with_state(pool) +} diff --git a/backend/src/api/deliberation.rs b/backend/src/api/deliberation.rs new file mode 100644 index 0000000..8bb9710 --- /dev/null +++ b/backend/src/api/deliberation.rs @@ -0,0 +1,592 @@ +//! Deliberation API endpoints for structured democratic discourse. + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; +use crate::plugins::builtin::structured_deliberation::{Argument, Summary, DeliberationService}; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct ProposalResource { + pub id: Uuid, + pub proposal_id: Uuid, + pub title: String, + pub resource_type: String, + pub content: Option, + pub url: Option, + pub author_name: Option, + pub sort_order: i32, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct CreateResourceRequest { + pub title: String, + pub resource_type: String, + pub content: Option, + pub url: Option, + pub author_name: Option, +} + +#[derive(Debug, Serialize)] +pub struct ResourceReadStatus { + pub resource_id: Uuid, + pub has_read: bool, + pub read_at: Option>, +} + +#[derive(Debug, Serialize)] +pub struct ProposalPosition { + pub id: Uuid, + pub proposal_id: Uuid, + pub user_id: Uuid, + pub position: String, + pub reasoning: Option, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct SetPositionRequest { + pub position: String, + pub reasoning: Option, +} + +#[derive(Debug, Serialize)] +pub struct PositionSummary { + pub strongly_support: i64, + pub support: i64, + pub neutral: i64, + pub oppose: i64, + pub strongly_oppose: i64, + pub total: i64, +} + +#[derive(Debug, Deserialize)] +pub struct ReactToCommentRequest { + pub reaction_type: String, +} + +#[derive(Debug, Serialize)] +pub struct CommentReactions { + pub agree: i64, + pub disagree: i64, + pub insightful: i64, + pub off_topic: i64, + pub constructive: i64, +} + +#[derive(Debug, Deserialize)] +pub struct AddArgumentRequest { + pub stance: String, + pub title: String, + pub content: String, + pub parent_id: Option, +} + +#[derive(Debug, Deserialize)] +pub struct VoteArgumentRequest { + pub vote_type: String, +} + +#[derive(Debug, Deserialize)] +pub struct ArgumentsQuery { + pub stance: Option, + pub limit: Option, +} + +#[derive(Debug, Deserialize)] +pub struct UpsertSummaryRequest { + pub summary_type: String, + pub content: String, + pub key_points: Option, +} + +#[derive(Debug, Deserialize)] +pub struct RecordReadingRequest { + pub read_type: String, + pub time_seconds: i32, +} + +#[derive(Debug, Serialize)] +pub struct CanParticipateResponse { + pub can_comment: bool, + pub can_vote: bool, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// List resources for a proposal's inform phase +async fn list_resources( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let resources = sqlx::query_as!( + ProposalResource, + r#"SELECT id, proposal_id, title, resource_type, content, url, author_name, sort_order, created_at + FROM proposal_resources + WHERE proposal_id = $1 + ORDER BY sort_order"#, + proposal_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(resources)) +} + +/// Add a resource to a proposal +async fn add_resource( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check if user is proposal author or facilitator + let proposal = sqlx::query!( + "SELECT author_id, facilitator_id FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + let can_add = proposal.author_id == auth.user_id + || proposal.facilitator_id == Some(auth.user_id); + + if !can_add { + return Err((StatusCode::FORBIDDEN, "Not authorized to add resources".to_string())); + } + + let resource = sqlx::query_as!( + ProposalResource, + r#"INSERT INTO proposal_resources (proposal_id, title, resource_type, content, url, author_name, created_by) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING id, proposal_id, title, resource_type, content, url, author_name, sort_order, created_at"#, + proposal_id, + req.title, + req.resource_type, + req.content, + req.url, + req.author_name, + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(resource)) +} + +/// Mark a resource as read +async fn mark_resource_read( + auth: AuthUser, + Path(resource_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + sqlx::query!( + r#"INSERT INTO proposal_resource_reads (resource_id, user_id) + VALUES ($1, $2) + ON CONFLICT (resource_id, user_id) DO NOTHING"#, + resource_id, + auth.user_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let read = sqlx::query!( + "SELECT read_at FROM proposal_resource_reads WHERE resource_id = $1 AND user_id = $2", + resource_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(ResourceReadStatus { + resource_id, + has_read: read.is_some(), + read_at: read.map(|r| r.read_at), + })) +} + +/// Get user's read status for all resources of a proposal +async fn get_read_status( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let statuses = sqlx::query!( + r#"SELECT r.id as resource_id, rr.read_at as "read_at?" + FROM proposal_resources r + LEFT JOIN proposal_resource_reads rr ON r.id = rr.resource_id AND rr.user_id = $2 + WHERE r.proposal_id = $1 + ORDER BY r.sort_order"#, + proposal_id, + auth.user_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(statuses.into_iter().map(|s| ResourceReadStatus { + resource_id: s.resource_id, + has_read: s.read_at.is_some(), + read_at: s.read_at, + }).collect())) +} + +/// Set user's position on a proposal +async fn set_position( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let position = sqlx::query_as!( + ProposalPosition, + r#"INSERT INTO proposal_positions (proposal_id, user_id, position, reasoning) + VALUES ($1, $2, $3, $4) + ON CONFLICT (proposal_id, user_id) + DO UPDATE SET position = $3, reasoning = $4, updated_at = NOW() + RETURNING id, proposal_id, user_id, position, reasoning, created_at"#, + proposal_id, + auth.user_id, + req.position, + req.reasoning + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(position)) +} + +/// Get position summary for a proposal (agreement visualization) +async fn get_position_summary( + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let counts = sqlx::query!( + r#"SELECT + COUNT(*) FILTER (WHERE position = 'strongly_support') as strongly_support, + COUNT(*) FILTER (WHERE position = 'support') as support, + COUNT(*) FILTER (WHERE position = 'neutral') as neutral, + COUNT(*) FILTER (WHERE position = 'oppose') as oppose, + COUNT(*) FILTER (WHERE position = 'strongly_oppose') as strongly_oppose, + COUNT(*) as total + FROM proposal_positions + WHERE proposal_id = $1"#, + proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(PositionSummary { + strongly_support: counts.strongly_support.unwrap_or(0), + support: counts.support.unwrap_or(0), + neutral: counts.neutral.unwrap_or(0), + oppose: counts.oppose.unwrap_or(0), + strongly_oppose: counts.strongly_oppose.unwrap_or(0), + total: counts.total.unwrap_or(0), + })) +} + +/// React to a comment +async fn react_to_comment( + auth: AuthUser, + Path(comment_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Add reaction (toggle - remove if exists, add if not) + let existing = sqlx::query!( + "SELECT id FROM comment_reactions WHERE comment_id = $1 AND user_id = $2 AND reaction_type = $3", + comment_id, + auth.user_id, + req.reaction_type + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if existing.is_some() { + sqlx::query!( + "DELETE FROM comment_reactions WHERE comment_id = $1 AND user_id = $2 AND reaction_type = $3", + comment_id, + auth.user_id, + req.reaction_type + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } else { + sqlx::query!( + "INSERT INTO comment_reactions (comment_id, user_id, reaction_type) VALUES ($1, $2, $3)", + comment_id, + auth.user_id, + req.reaction_type + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + + // Return updated counts + get_comment_reactions_internal(&pool, comment_id).await +} + +/// Get reactions for a comment +async fn get_comment_reactions( + Path(comment_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + get_comment_reactions_internal(&pool, comment_id).await +} + +async fn get_comment_reactions_internal( + pool: &PgPool, + comment_id: Uuid, +) -> Result, (StatusCode, String)> { + let counts = sqlx::query!( + r#"SELECT + COUNT(*) FILTER (WHERE reaction_type = 'agree') as agree, + COUNT(*) FILTER (WHERE reaction_type = 'disagree') as disagree, + COUNT(*) FILTER (WHERE reaction_type = 'insightful') as insightful, + COUNT(*) FILTER (WHERE reaction_type = 'off_topic') as off_topic, + COUNT(*) FILTER (WHERE reaction_type = 'constructive') as constructive + FROM comment_reactions + WHERE comment_id = $1"#, + comment_id + ) + .fetch_one(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(CommentReactions { + agree: counts.agree.unwrap_or(0), + disagree: counts.disagree.unwrap_or(0), + insightful: counts.insightful.unwrap_or(0), + off_topic: counts.off_topic.unwrap_or(0), + constructive: counts.constructive.unwrap_or(0), + })) +} + +// ============================================================================ +// Argument Handlers (wired to DeliberationService) +// ============================================================================ + +/// Get arguments for a proposal +async fn list_arguments( + Path(proposal_id): Path, + Query(query): Query, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let limit = query.limit.unwrap_or(50); + let arguments = DeliberationService::get_arguments( + &pool, + proposal_id, + query.stance.as_deref(), + limit, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(arguments)) +} + +/// Add an argument to a proposal +async fn add_argument( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check if user can participate + let can = DeliberationService::check_can_participate(&pool, proposal_id, auth.user_id, "comment") + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if !can { + return Err((StatusCode::FORBIDDEN, "Must read proposal content before participating".to_string())); + } + + let argument_id = DeliberationService::add_argument( + &pool, + proposal_id, + req.parent_id, + &req.stance, + &req.title, + &req.content, + auth.user_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": argument_id}))) +} + +/// Vote on an argument +async fn vote_argument( + auth: AuthUser, + Path(argument_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + DeliberationService::vote_argument(&pool, argument_id, auth.user_id, &req.vote_type) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +// ============================================================================ +// Summary Handlers (wired to DeliberationService) +// ============================================================================ + +/// Get summaries for a proposal +async fn list_summaries( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let summaries = DeliberationService::get_summaries(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(summaries)) +} + +/// Create or update a summary +async fn upsert_summary( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let key_points = req.key_points.unwrap_or(json!([])); + let summary_id = DeliberationService::upsert_summary( + &pool, + proposal_id, + &req.summary_type, + &req.content, + key_points, + auth.user_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": summary_id}))) +} + +/// Approve a summary +async fn approve_summary( + auth: AuthUser, + Path(summary_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + DeliberationService::approve_summary(&pool, summary_id, auth.user_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +// ============================================================================ +// Reading/Participation Handlers (wired to DeliberationService) +// ============================================================================ + +/// Record reading progress +async fn record_reading( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + DeliberationService::record_reading( + &pool, + proposal_id, + auth.user_id, + &req.read_type, + req.time_seconds, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Check if user can participate +async fn check_can_participate( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let can_comment = DeliberationService::check_can_participate(&pool, proposal_id, auth.user_id, "comment") + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let can_vote = DeliberationService::check_can_participate(&pool, proposal_id, auth.user_id, "vote") + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(CanParticipateResponse { can_comment, can_vote })) +} + +/// Get deliberation overview (metrics + top arguments + summaries) +async fn get_overview( + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let overview = DeliberationService::get_overview(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(overview)) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Resources (inform phase) + .route("/api/proposals/{proposal_id}/resources", get(list_resources).post(add_resource)) + .route("/api/proposals/{proposal_id}/resources/read-status", get(get_read_status)) + .route("/api/resources/{resource_id}/read", post(mark_resource_read)) + // Positions (agreement visualization) + .route("/api/proposals/{proposal_id}/positions", post(set_position)) + .route("/api/proposals/{proposal_id}/positions/summary", get(get_position_summary)) + // Comment reactions (quality scoring) + .route("/api/comments/{comment_id}/reactions", get(get_comment_reactions).post(react_to_comment)) + // Arguments (structured debate) - wired to DeliberationService + .route("/api/proposals/{proposal_id}/arguments", get(list_arguments).post(add_argument)) + .route("/api/arguments/{argument_id}/vote", post(vote_argument)) + // Summaries (collaborative summaries) - wired to DeliberationService + .route("/api/proposals/{proposal_id}/summaries", get(list_summaries).post(upsert_summary)) + .route("/api/summaries/{summary_id}/approve", post(approve_summary)) + // Reading/participation tracking - wired to DeliberationService + .route("/api/proposals/{proposal_id}/reading", post(record_reading)) + .route("/api/proposals/{proposal_id}/can-participate", get(check_can_participate)) + // Overview (combined metrics) + .route("/api/proposals/{proposal_id}/deliberation", get(get_overview)) + .with_state(pool) +} diff --git a/backend/src/api/demo.rs b/backend/src/api/demo.rs new file mode 100644 index 0000000..95b1e1b --- /dev/null +++ b/backend/src/api/demo.rs @@ -0,0 +1,137 @@ +//! Demo API endpoints + +use axum::{ + extract::State, + http::StatusCode, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use serde_json::json; +use sqlx::PgPool; +use std::sync::Arc; + +use crate::config::Config; +use crate::demo::{self, DEMO_ACCOUNTS}; + +/// Combined state for demo endpoints +#[derive(Clone)] +pub struct DemoState { + pub pool: PgPool, + pub config: Arc, +} + +/// Get demo mode status and available accounts +async fn get_demo_status( + State(state): State, +) -> impl IntoResponse { + Json(json!({ + "demo_mode": state.config.is_demo(), + "accounts": if state.config.is_demo() { + DEMO_ACCOUNTS.iter().map(|(u, _, d)| json!({ + "username": u, + "display_name": d, + "password": "demo123" + })).collect::>() + } else { + vec![] + }, + "restrictions": if state.config.is_demo() { + vec![ + "Cannot delete communities", + "Cannot delete users", + "Cannot modify instance settings", + "Data resets periodically" + ] + } else { + vec![] + } + })) +} + +/// Reset demo data to initial state (only in demo mode) +async fn reset_demo( + State(state): State, +) -> impl IntoResponse { + if !state.config.is_demo() { + return ( + StatusCode::FORBIDDEN, + Json(json!({"error": "Demo mode not enabled"})) + ).into_response(); + } + + match demo::reset_demo_data(&state.pool).await { + Ok(_) => ( + StatusCode::OK, + Json(json!({"success": true, "message": "Demo data has been reset to initial state"})) + ).into_response(), + Err(e) => { + tracing::error!("Failed to reset demo data: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": "Failed to reset demo data"})) + ).into_response() + } + } +} + +/// Get demo communities summary +async fn get_demo_communities( + State(state): State, +) -> impl IntoResponse { + if !state.config.is_demo() { + return ( + StatusCode::OK, + Json(json!({"communities": []})) + ).into_response(); + } + + let communities = sqlx::query_as::<_, (String, String, String, i64, i64)>( + r#" + SELECT + c.name, + c.slug, + COALESCE(c.description, '') as description, + (SELECT COUNT(*) FROM community_members WHERE community_id = c.id) as member_count, + (SELECT COUNT(*) FROM proposals WHERE community_id = c.id) as proposal_count + FROM communities c + WHERE c.slug IN ('aurora', 'civic-commons', 'makers') + ORDER BY c.name + "# + ) + .fetch_all(&state.pool) + .await; + + match communities { + Ok(rows) => { + let communities: Vec<_> = rows.iter().map(|(name, slug, desc, members, proposals)| { + json!({ + "name": name, + "slug": slug, + "description": desc, + "member_count": members, + "proposal_count": proposals + }) + }).collect(); + + (StatusCode::OK, Json(json!({"communities": communities}))).into_response() + } + Err(e) => { + tracing::error!("Failed to fetch demo communities: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": "Failed to fetch communities"})) + ).into_response() + } + } +} + +pub fn router(pool: PgPool, config: Arc) -> Router { + let state = DemoState { pool, config }; + + Router::new() + .route("/api/demo/status", get(get_demo_status)) + .route("/api/demo/reset", post(reset_demo)) + .route("/api/demo/communities", get(get_demo_communities)) + .with_state(state) +} diff --git a/backend/src/api/exports.rs b/backend/src/api/exports.rs new file mode 100644 index 0000000..5d79da6 --- /dev/null +++ b/backend/src/api/exports.rs @@ -0,0 +1,91 @@ +//! Public Data Export API endpoints. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::get, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::Deserialize; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::public_data_export::{ExportConfig, ExportJob, ExportService}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct CreateExportRequest { + pub export_type: String, + pub format: String, + pub date_from: Option>, + pub date_to: Option>, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get available export configurations for a community +async fn get_available( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let configs = ExportService::get_available(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(configs)) +} + +/// Create a new export job +async fn create_job( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let job_id = ExportService::create_job( + &pool, + community_id, + &req.export_type, + &req.format, + Some(auth.user_id), + req.date_from, + req.date_to, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": job_id}))) +} + +/// Get export job status +async fn get_job( + _auth: AuthUser, + Path(job_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let job = ExportService::get_job(&pool, job_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(job)) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/communities/{community_id}/exports", get(get_available).post(create_job)) + .route("/api/exports/{job_id}", get(get_job)) + .with_state(pool) +} diff --git a/backend/src/api/federation.rs b/backend/src/api/federation.rs new file mode 100644 index 0000000..a82627d --- /dev/null +++ b/backend/src/api/federation.rs @@ -0,0 +1,210 @@ +//! Federation API endpoints. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::Deserialize; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::federation::{ + CommunityFederation, FederatedInstance, FederationService, +}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct RegisterInstanceRequest { + pub url: String, + pub name: String, + pub description: Option, + pub public_key: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreateFederationRequest { + pub remote_instance_id: Uuid, + pub remote_community_id: Uuid, + pub remote_community_name: String, + pub sync_direction: String, +} + +#[derive(Debug, Deserialize)] +pub struct SetTrustLevelRequest { + pub trust_level: i32, +} + +#[derive(Debug, Deserialize)] +pub struct IncomingFederationRequest { + pub from_instance_url: String, + pub from_community_name: String, + pub message: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get all federated instances +async fn get_instances( + _auth: AuthUser, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let instances = FederationService::get_instances(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(instances)) +} + +/// Register a new federated instance +async fn register_instance( + _auth: AuthUser, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let instance_id = FederationService::register_instance( + &pool, + &req.url, + &req.name, + req.description.as_deref(), + req.public_key.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": instance_id}))) +} + +/// Get federation statistics +async fn get_stats( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let stats = FederationService::get_stats(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(stats)) +} + +/// Get community federations +async fn get_community_federations( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let federations = FederationService::get_community_federations(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(federations)) +} + +/// Request federation with another community +async fn request_federation( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let federation_id = FederationService::create_federation( + &pool, + community_id, + req.remote_instance_id, + req.remote_community_id, + &req.remote_community_name, + &req.sync_direction, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": federation_id}))) +} + +/// Approve a federation request +async fn approve_federation( + auth: AuthUser, + Path(federation_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + FederationService::approve_federation(&pool, federation_id, auth.user_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Set trust level for an instance +async fn set_trust_level( + _auth: AuthUser, + Path(instance_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + FederationService::set_trust_level(&pool, instance_id, req.trust_level) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Handle incoming federation request from another instance +/// Note: This endpoint is intentionally unauthenticated for server-to-server federation. +/// The request creates a PENDING entry that must be approved by a community admin. +/// TODO: Add rate limiting and instance signature verification for production. +async fn receive_federation_request( + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Validate the community exists and accepts federation requests + let community_exists = sqlx::query_scalar!( + "SELECT EXISTS(SELECT 1 FROM communities WHERE id = $1 AND is_active = true) as \"exists!\"", + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if !community_exists { + return Err((StatusCode::NOT_FOUND, "Community not found or inactive".to_string())); + } + + let request_id = FederationService::request_federation( + &pool, + &req.from_instance_url, + &req.from_community_name, + community_id, + req.message.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": request_id}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Instances + .route("/api/federation/instances", get(get_instances).post(register_instance)) + .route("/api/federation/instances/{instance_id}/trust", post(set_trust_level)) + // Community federations + .route("/api/communities/{community_id}/federation", get(get_community_federations).post(request_federation)) + .route("/api/communities/{community_id}/federation/stats", get(get_stats)) + .route("/api/communities/{community_id}/federation/request", post(receive_federation_request)) + .route("/api/federation/{federation_id}/approve", post(approve_federation)) + .with_state(pool) +} diff --git a/backend/src/api/gitlab.rs b/backend/src/api/gitlab.rs new file mode 100644 index 0000000..36c5de9 --- /dev/null +++ b/backend/src/api/gitlab.rs @@ -0,0 +1,348 @@ +//! GitLab Integration API +//! +//! Enables linking communities to GitLab projects for governance workflows +//! including issue tracking and merge request voting. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct GitLabConnection { + pub id: Uuid, + pub community_id: Uuid, + pub gitlab_url: String, + pub project_path: String, + pub is_active: bool, + pub sync_issues: bool, + pub sync_merge_requests: bool, + pub auto_create_proposals: bool, + pub last_synced_at: Option>, +} + +#[derive(Debug, Deserialize)] +pub struct CreateConnectionRequest { + pub gitlab_url: String, + pub project_path: String, + #[allow(dead_code)] + pub access_token: Option, + pub sync_issues: Option, + pub sync_merge_requests: Option, + pub auto_create_proposals: Option, +} + +/// Request to update a GitLab connection. Designed for PUT endpoint. +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +pub struct UpdateConnectionRequest { + pub is_active: Option, + pub sync_issues: Option, + pub sync_merge_requests: Option, + pub auto_create_proposals: Option, +} + +#[derive(Debug, Serialize)] +pub struct GitLabIssue { + pub id: Uuid, + pub gitlab_iid: i32, + pub title: String, + pub description: Option, + pub state: String, + pub author_username: Option, + pub labels: Vec, + pub proposal_id: Option, + pub gitlab_created_at: Option>, +} + +#[derive(Debug, Serialize)] +pub struct GitLabMergeRequest { + pub id: Uuid, + pub gitlab_iid: i32, + pub title: String, + pub description: Option, + pub state: String, + pub author_username: Option, + pub source_branch: Option, + pub target_branch: Option, + pub labels: Vec, + pub proposal_id: Option, + pub gitlab_created_at: Option>, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get GitLab connection for a community +async fn get_connection( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + // Check membership + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + community_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if membership.is_none() { + return Err((StatusCode::FORBIDDEN, "Not a community member".to_string())); + } + + let connection = sqlx::query!( + r#"SELECT id, community_id, gitlab_url, project_path, is_active, + sync_issues, sync_merge_requests, auto_create_proposals, last_synced_at + FROM gitlab_connections WHERE community_id = $1"#, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(connection.map(|c| GitLabConnection { + id: c.id, + community_id: c.community_id, + gitlab_url: c.gitlab_url, + project_path: c.project_path, + is_active: c.is_active, + sync_issues: c.sync_issues, + sync_merge_requests: c.sync_merge_requests, + auto_create_proposals: c.auto_create_proposals, + last_synced_at: c.last_synced_at, + }))) +} + +/// Create or update GitLab connection +async fn create_connection( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check admin role + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + community_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::FORBIDDEN, "Not a community member".to_string()))?; + + if membership.role != "admin" { + return Err((StatusCode::FORBIDDEN, "Only admins can configure GitLab".to_string())); + } + + // Validate GitLab URL + if !req.gitlab_url.starts_with("https://") { + return Err((StatusCode::BAD_REQUEST, "GitLab URL must use HTTPS".to_string())); + } + + let connection = sqlx::query!( + r#"INSERT INTO gitlab_connections + (community_id, gitlab_url, project_path, sync_issues, sync_merge_requests, auto_create_proposals) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (community_id) DO UPDATE SET + gitlab_url = $2, + project_path = $3, + sync_issues = COALESCE($4, gitlab_connections.sync_issues), + sync_merge_requests = COALESCE($5, gitlab_connections.sync_merge_requests), + auto_create_proposals = COALESCE($6, gitlab_connections.auto_create_proposals), + updated_at = NOW() + RETURNING id, community_id, gitlab_url, project_path, is_active, + sync_issues, sync_merge_requests, auto_create_proposals, last_synced_at"#, + community_id, + req.gitlab_url, + req.project_path, + req.sync_issues, + req.sync_merge_requests, + req.auto_create_proposals + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(GitLabConnection { + id: connection.id, + community_id: connection.community_id, + gitlab_url: connection.gitlab_url, + project_path: connection.project_path, + is_active: connection.is_active, + sync_issues: connection.sync_issues, + sync_merge_requests: connection.sync_merge_requests, + auto_create_proposals: connection.auto_create_proposals, + last_synced_at: connection.last_synced_at, + })) +} + +/// List GitLab issues for a community +async fn list_issues( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let issues = sqlx::query!( + r#"SELECT gi.id, gi.gitlab_iid, gi.title, gi.description, gi.state, + gi.author_username, gi.labels, gi.proposal_id, gi.gitlab_created_at + FROM gitlab_issues gi + JOIN gitlab_connections gc ON gi.connection_id = gc.id + WHERE gc.community_id = $1 + ORDER BY gi.gitlab_iid DESC + LIMIT 100"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(issues.into_iter().map(|i| GitLabIssue { + id: i.id, + gitlab_iid: i.gitlab_iid, + title: i.title, + description: i.description, + state: i.state, + author_username: i.author_username, + labels: i.labels.unwrap_or_default(), + proposal_id: i.proposal_id, + gitlab_created_at: i.gitlab_created_at, + }).collect())) +} + +/// List GitLab merge requests for a community +async fn list_merge_requests( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let mrs = sqlx::query!( + r#"SELECT gmr.id, gmr.gitlab_iid, gmr.title, gmr.description, gmr.state, + gmr.author_username, gmr.source_branch, gmr.target_branch, + gmr.labels, gmr.proposal_id, gmr.gitlab_created_at + FROM gitlab_merge_requests gmr + JOIN gitlab_connections gc ON gmr.connection_id = gc.id + WHERE gc.community_id = $1 + ORDER BY gmr.gitlab_iid DESC + LIMIT 100"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(mrs.into_iter().map(|m| GitLabMergeRequest { + id: m.id, + gitlab_iid: m.gitlab_iid, + title: m.title, + description: m.description, + state: m.state, + author_username: m.author_username, + source_branch: m.source_branch, + target_branch: m.target_branch, + labels: m.labels.unwrap_or_default(), + proposal_id: m.proposal_id, + gitlab_created_at: m.gitlab_created_at, + }).collect())) +} + +/// Create proposal from GitLab issue +async fn create_proposal_from_issue( + auth: AuthUser, + Path((community_id, issue_id)): Path<(Uuid, Uuid)>, + State(pool): State, +) -> Result, (StatusCode, String)> { + // Check membership + let _membership = sqlx::query!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + community_id, + auth.user_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::FORBIDDEN, "Not a community member".to_string()))?; + + // Get the issue + let issue = sqlx::query!( + r#"SELECT gi.id, gi.title, gi.description, gi.proposal_id + FROM gitlab_issues gi + JOIN gitlab_connections gc ON gi.connection_id = gc.id + WHERE gi.id = $1 AND gc.community_id = $2"#, + issue_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Issue not found".to_string()))?; + + if issue.proposal_id.is_some() { + return Err((StatusCode::CONFLICT, "Issue already linked to a proposal".to_string())); + } + + // Create proposal + let proposal = sqlx::query!( + r#"INSERT INTO proposals (community_id, author_id, title, description, status, voting_method) + VALUES ($1, $2, $3, $4, 'draft', 'approval') + RETURNING id"#, + community_id, + auth.user_id, + issue.title, + issue.description.unwrap_or_default() + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Link issue to proposal + sqlx::query!( + "UPDATE gitlab_issues SET proposal_id = $1 WHERE id = $2", + proposal.id, + issue_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Create default options + sqlx::query!( + "INSERT INTO proposal_options (proposal_id, label, sort_order) VALUES ($1, 'Approve', 1), ($1, 'Reject', 2)", + proposal.id + ) + .execute(&pool) + .await + .ok(); + + Ok(Json(serde_json::json!({ + "proposal_id": proposal.id, + "message": "Proposal created from GitLab issue" + }))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/communities/{community_id}/gitlab", get(get_connection).post(create_connection)) + .route("/api/communities/{community_id}/gitlab/issues", get(list_issues)) + .route("/api/communities/{community_id}/gitlab/merge-requests", get(list_merge_requests)) + .route("/api/communities/{community_id}/gitlab/issues/{issue_id}/create-proposal", post(create_proposal_from_issue)) + .with_state(pool) +} diff --git a/backend/src/api/health.rs b/backend/src/api/health.rs new file mode 100644 index 0000000..e3564fb --- /dev/null +++ b/backend/src/api/health.rs @@ -0,0 +1,25 @@ +use axum::{routing::get, Json, Router}; +use serde::Serialize; + +#[derive(Serialize)] +pub struct HealthResponse { + pub status: String, + pub version: String, +} + +pub fn router() -> Router { + Router::new() + .route("/health", get(health_check)) + .route("/", get(root)) +} + +async fn root() -> &'static str { + "Likwid - Modular Governance Platform" +} + +async fn health_check() -> Json { + Json(HealthResponse { + status: "healthy".to_string(), + version: env!("CARGO_PKG_VERSION").to_string(), + }) +} diff --git a/backend/src/api/invitations.rs b/backend/src/api/invitations.rs new file mode 100644 index 0000000..d2f43b7 --- /dev/null +++ b/backend/src/api/invitations.rs @@ -0,0 +1,280 @@ +//! Invitation API endpoints for invite-only registration. + +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + routing::{get, delete}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; +use super::permissions::{require_permission, user_has_permission, perms}; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct Invitation { + pub id: Uuid, + pub code: String, + pub created_by: Uuid, + pub created_by_username: Option, + pub email: Option, + pub community_id: Option, + pub community_name: Option, + pub max_uses: Option, + pub uses_count: i32, + pub expires_at: Option>, + pub is_active: bool, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct CreateInvitationRequest { + pub email: Option, + pub community_id: Option, + #[serde(default = "default_max_uses")] + pub max_uses: Option, + pub expires_in_hours: Option, +} + +fn default_max_uses() -> Option { Some(1) } + +#[derive(Debug, Deserialize)] +pub struct ListInvitationsQuery { + pub community_id: Option, + pub active_only: Option, +} + +#[derive(Debug, Serialize)] +pub struct InvitationValidation { + pub valid: bool, + pub email_restricted: bool, + pub community_id: Option, + pub community_name: Option, + pub expires_at: Option>, + pub error: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Create a new invitation (requires invite permission) +async fn create_invitation( + auth: AuthUser, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check permission: platform invite or community invite + if let Some(community_id) = req.community_id { + require_permission(&pool, auth.user_id, perms::USER_INVITE, Some(community_id)).await?; + } else { + require_permission(&pool, auth.user_id, perms::USER_INVITE, None).await?; + } + + // Generate code using database function + let code: String = sqlx::query_scalar!("SELECT generate_invitation_code()") + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::INTERNAL_SERVER_ERROR, "Failed to generate invitation code".to_string()))?; + + // Calculate expiration + let expires_at = req.expires_in_hours.map(|h| { + Utc::now() + chrono::Duration::hours(h as i64) + }); + + let invite = sqlx::query!( + r#"INSERT INTO invitations (code, created_by, email, community_id, max_uses, expires_at) + VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id, code, created_by, email, community_id, max_uses, uses_count, + expires_at, is_active, created_at"#, + code, + auth.user_id, + req.email, + req.community_id, + req.max_uses, + expires_at + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Get creator username and community name + let creator = sqlx::query!("SELECT username FROM users WHERE id = $1", auth.user_id) + .fetch_optional(&pool) + .await + .ok() + .flatten(); + + let community = if let Some(cid) = req.community_id { + sqlx::query!("SELECT name FROM communities WHERE id = $1", cid) + .fetch_optional(&pool) + .await + .ok() + .flatten() + } else { + None + }; + + Ok(Json(Invitation { + id: invite.id, + code: invite.code, + created_by: invite.created_by, + created_by_username: creator.map(|c| c.username), + email: invite.email, + community_id: invite.community_id, + community_name: community.map(|c| c.name), + max_uses: invite.max_uses, + uses_count: invite.uses_count.unwrap_or(0), + expires_at: invite.expires_at, + is_active: invite.is_active.unwrap_or(true), + created_at: invite.created_at.unwrap_or_else(Utc::now), + })) +} + +/// List invitations created by current user or all (admin) +async fn list_invitations( + auth: AuthUser, + State(pool): State, + Query(query): Query, +) -> Result>, (StatusCode, String)> { + let is_admin = user_has_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + // Use a single query with conditional filtering + let invites = sqlx::query!( + r#"SELECT i.*, u.username as creator_username, c.name as community_name + FROM invitations i + LEFT JOIN users u ON u.id = i.created_by + LEFT JOIN communities c ON c.id = i.community_id + WHERE ($1::boolean = true OR i.created_by = $2) + AND ($3::uuid IS NULL OR i.community_id = $3) + AND ($4::boolean IS NULL OR i.is_active = $4) + ORDER BY i.created_at DESC + LIMIT 100"#, + is_admin, + auth.user_id, + query.community_id, + query.active_only + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(invites.into_iter().map(|i| Invitation { + id: i.id, + code: i.code, + created_by: i.created_by, + created_by_username: Some(i.creator_username), + email: i.email, + community_id: i.community_id, + community_name: Some(i.community_name), + max_uses: i.max_uses, + uses_count: i.uses_count.unwrap_or(0), + expires_at: i.expires_at, + is_active: i.is_active.unwrap_or(true), + created_at: i.created_at.unwrap_or_else(Utc::now), + }).collect())) +} + +/// Validate an invitation code (public endpoint for registration) +async fn validate_invitation( + State(pool): State, + Path(code): Path, +) -> Result, (StatusCode, String)> { + let invite = sqlx::query!( + r#"SELECT i.*, c.name as community_name + FROM invitations i + LEFT JOIN communities c ON c.id = i.community_id + WHERE i.code = $1"#, + code + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match invite { + None => Ok(Json(InvitationValidation { + valid: false, + email_restricted: false, + community_id: None, + community_name: None, + expires_at: None, + error: Some("Invalid invitation code".to_string()), + })), + Some(i) => { + let mut valid = i.is_active.unwrap_or(false); + let mut error = None; + + // Check expiration + if let Some(exp) = i.expires_at { + if exp < Utc::now() { + valid = false; + error = Some("Invitation has expired".to_string()); + } + } + + // Check max uses + if let Some(max) = i.max_uses { + if i.uses_count.unwrap_or(0) >= max { + valid = false; + error = Some("Invitation has reached maximum uses".to_string()); + } + } + + Ok(Json(InvitationValidation { + valid, + email_restricted: i.email.is_some(), + community_id: i.community_id, + community_name: Some(i.community_name), + expires_at: i.expires_at, + error, + })) + } + } +} + +/// Revoke an invitation +async fn revoke_invitation( + auth: AuthUser, + State(pool): State, + Path(invitation_id): Path, +) -> Result, (StatusCode, String)> { + // Check ownership or admin + let invite = sqlx::query!("SELECT created_by FROM invitations WHERE id = $1", invitation_id) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Invitation not found".to_string()))?; + + let is_admin = user_has_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + if invite.created_by != auth.user_id && !is_admin { + return Err((StatusCode::FORBIDDEN, "Not authorized to revoke this invitation".to_string())); + } + + sqlx::query!("UPDATE invitations SET is_active = FALSE WHERE id = $1", invitation_id) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": true}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/invitations", get(list_invitations).post(create_invitation)) + .route("/api/invitations/validate/{code}", get(validate_invitation)) + .route("/api/invitations/{id}", delete(revoke_invitation)) + .with_state(pool) +} diff --git a/backend/src/api/lifecycle.rs b/backend/src/api/lifecycle.rs new file mode 100644 index 0000000..08d543a --- /dev/null +++ b/backend/src/api/lifecycle.rs @@ -0,0 +1,247 @@ +//! Proposal Lifecycle API endpoints. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::Deserialize; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::proposal_lifecycle::{ + LifecycleService, ProposalAmendment, ProposalVersion, +}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct TransitionStatusRequest { + pub new_status: String, + pub trigger_type: String, + pub reason: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ForkProposalRequest { + pub reason: String, + pub community_id: Uuid, +} + +#[derive(Debug, Deserialize)] +pub struct CompareVersionsRequest { + pub from_version: i32, + pub to_version: i32, +} + +#[derive(Debug, Deserialize)] +pub struct ProposeAmendmentRequest { + pub title: String, + pub description: String, + pub suggested_changes: Value, +} + +#[derive(Debug, Deserialize)] +pub struct VoteAmendmentRequest { + pub vote: String, + pub comment: Option, +} + +#[derive(Debug, Deserialize)] +pub struct AcceptAmendmentRequest { + pub response: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get version history for a proposal +async fn get_versions( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let versions = LifecycleService::get_versions(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(versions)) +} + +/// Get a specific version +async fn get_version( + Path((proposal_id, version_number)): Path<(Uuid, i32)>, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let version = LifecycleService::get_version(&pool, proposal_id, version_number) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(version)) +} + +/// Compare two versions +async fn compare_versions( + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let diff = LifecycleService::compare_versions(&pool, proposal_id, req.from_version, req.to_version) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(diff)) +} + +/// Transition proposal status +async fn transition_status( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let success = LifecycleService::transition_status( + &pool, + proposal_id, + &req.new_status, + auth.user_id, + &req.trigger_type, + req.reason.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": success}))) +} + +/// Fork a proposal +async fn fork_proposal( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let new_id = LifecycleService::fork_proposal( + &pool, + proposal_id, + auth.user_id, + &req.reason, + req.community_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"id": new_id}))) +} + +/// Get forks of a proposal +async fn get_forks( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let forks = LifecycleService::get_forks(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(forks)) +} + +/// Get lifecycle summary +async fn get_lifecycle_summary( + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let summary = LifecycleService::get_lifecycle_summary(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(summary)) +} + +/// Get amendments for a proposal +async fn get_amendments( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let amendments = LifecycleService::get_amendments(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(amendments)) +} + +/// Propose an amendment +async fn propose_amendment( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let amendment_id = LifecycleService::propose_amendment( + &pool, + proposal_id, + &req.title, + &req.description, + req.suggested_changes, + auth.user_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"id": amendment_id}))) +} + +/// Vote on an amendment +async fn vote_amendment( + auth: AuthUser, + Path(amendment_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + LifecycleService::vote_amendment(&pool, amendment_id, auth.user_id, &req.vote, req.comment.as_deref()) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": true}))) +} + +/// Accept an amendment +async fn accept_amendment( + auth: AuthUser, + Path(amendment_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + LifecycleService::accept_amendment(&pool, amendment_id, auth.user_id, req.response.as_deref()) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": true}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Versions + .route("/api/proposals/{proposal_id}/versions", get(get_versions)) + .route("/api/proposals/{proposal_id}/versions/{version_number}", get(get_version)) + .route("/api/proposals/{proposal_id}/versions/compare", post(compare_versions)) + // Lifecycle + .route("/api/proposals/{proposal_id}/lifecycle", get(get_lifecycle_summary)) + .route("/api/proposals/{proposal_id}/lifecycle/transition", post(transition_status)) + .route("/api/proposals/{proposal_id}/lifecycle/fork", post(fork_proposal)) + .route("/api/proposals/{proposal_id}/forks", get(get_forks)) + // Amendments + .route("/api/proposals/{proposal_id}/amendments", get(get_amendments).post(propose_amendment)) + .route("/api/amendments/{amendment_id}/vote", post(vote_amendment)) + .route("/api/amendments/{amendment_id}/accept", post(accept_amendment)) + .with_state(pool) +} diff --git a/backend/src/api/mod.rs b/backend/src/api/mod.rs new file mode 100644 index 0000000..013e92d --- /dev/null +++ b/backend/src/api/mod.rs @@ -0,0 +1,63 @@ +pub mod analytics; +pub mod approvals; +pub mod auth; +pub mod comments; +pub mod communities; +pub mod conflicts; +pub mod deliberation; +pub mod delegation; +pub mod demo; +pub mod exports; +pub mod federation; +pub mod gitlab; +pub mod health; +pub mod invitations; +pub mod lifecycle; +pub mod moderation; +pub mod moderation_ledger; +pub mod notifications; +pub mod permissions; +pub mod plugins; +pub mod proposals; +pub mod roles; +pub mod self_moderation; +pub mod settings; +pub mod users; +pub mod voting_config; +pub mod workflows; + +use axum::Router; +use sqlx::PgPool; +use std::sync::Arc; + +use crate::config::Config; + +pub fn create_router(pool: PgPool, config: Arc) -> Router { + Router::new() + .merge(health::router()) + .merge(auth::router(pool.clone())) + .merge(users::router(pool.clone())) + .merge(communities::router(pool.clone())) + .merge(plugins::router(pool.clone())) + .merge(proposals::router(pool.clone())) + .merge(moderation::router(pool.clone())) + .merge(comments::router(pool.clone())) + .merge(notifications::router(pool.clone())) + .merge(settings::router(pool.clone())) + .merge(deliberation::router(pool.clone())) + .merge(delegation::router(pool.clone())) + .merge(gitlab::router(pool.clone())) + .merge(roles::router(pool.clone())) + .merge(voting_config::router(pool.clone())) + .merge(invitations::router(pool.clone())) + .merge(approvals::router(pool.clone())) + .merge(moderation_ledger::router(pool.clone())) + .merge(analytics::router(pool.clone())) + .merge(lifecycle::router(pool.clone())) + .merge(exports::router(pool.clone())) + .merge(self_moderation::router(pool.clone())) + .merge(federation::router(pool.clone())) + .merge(conflicts::router(pool.clone())) + .merge(workflows::router(pool.clone())) + .merge(demo::router(pool, config)) +} diff --git a/backend/src/api/moderation.rs b/backend/src/api/moderation.rs new file mode 100644 index 0000000..d1e8b7c --- /dev/null +++ b/backend/src/api/moderation.rs @@ -0,0 +1,148 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; +use crate::api::permissions::{require_permission, perms}; + +#[derive(Debug, Serialize)] +pub struct ModerationEntry { + pub id: Uuid, + pub community_id: Uuid, + pub moderator_username: Option, + pub target_username: Option, + pub action_type: String, + pub reason: String, + pub details: Option, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct CreateModerationEntry { + pub target_user_id: Option, + pub action_type: String, + pub reason: String, + pub details: Option, +} + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/communities/{community_id}/moderation", get(list_moderation).post(create_moderation)) + .with_state(pool) +} + +async fn list_moderation( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + // Require permission to view moderation reports + require_permission(&pool, auth.user_id, perms::MOD_VIEW_REPORTS, Some(community_id)).await?; + + let entries = sqlx::query!( + r#" + SELECT + m.id, m.community_id, m.action_type, m.reason, m.details, m.created_at, + mod_user.username as moderator_username, + target_user.username as target_username + FROM moderation_log m + LEFT JOIN users mod_user ON m.moderator_id = mod_user.id + LEFT JOIN users target_user ON m.target_user_id = target_user.id + WHERE m.community_id = $1 + ORDER BY m.created_at DESC + LIMIT 50 + "#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result: Vec = entries + .into_iter() + .map(|row| ModerationEntry { + id: row.id, + community_id: row.community_id, + moderator_username: Some(row.moderator_username), + target_username: Some(row.target_username), + action_type: row.action_type, + reason: row.reason, + details: row.details, + created_at: row.created_at, + }) + .collect(); + + Ok(Json(result)) +} + +async fn create_moderation( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::user_has_permission; + + // Check specific permission based on action type + let has_permission = match req.action_type.as_str() { + "ban" | "unban" | "suspend" | "unsuspend" => { + user_has_permission(&pool, auth.user_id, perms::MOD_BAN_USERS, Some(community_id)).await? + } + "remove_content" | "hide_content" | "restore_content" => { + user_has_permission(&pool, auth.user_id, perms::MOD_REMOVE_CONTENT, Some(community_id)).await? + } + "admin_action" | "settings_change" => { + user_has_permission(&pool, auth.user_id, perms::COMMUNITY_ADMIN, Some(community_id)).await? + } + _ => { + // Default to requiring community.moderate permission + user_has_permission(&pool, auth.user_id, perms::COMMUNITY_MODERATE, Some(community_id)).await? + } + }; + + if !has_permission { + return Err((StatusCode::FORBIDDEN, format!("Permission required for action '{}'", req.action_type))); + } + + let entry = sqlx::query!( + r#" + INSERT INTO moderation_log (community_id, moderator_id, target_user_id, action_type, reason, details) + VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id, created_at + "#, + community_id, + auth.user_id, + req.target_user_id, + req.action_type, + req.reason, + req.details + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!( + "Moderation action '{}' logged by {} in community {}", + req.action_type, + auth.username, + community_id + ); + + Ok(Json(ModerationEntry { + id: entry.id, + community_id, + moderator_username: Some(auth.username), + target_username: None, + action_type: req.action_type, + reason: req.reason, + details: req.details, + created_at: entry.created_at, + })) +} diff --git a/backend/src/api/moderation_ledger.rs b/backend/src/api/moderation_ledger.rs new file mode 100644 index 0000000..a953675 --- /dev/null +++ b/backend/src/api/moderation_ledger.rs @@ -0,0 +1,612 @@ +use axum::{ + extract::{Extension, Path, Query}, + http::StatusCode, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::moderation_ledger::{LedgerService, ModerationActionType}; + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/ledger", get(list_entries)) + .route("/api/ledger/entry/{id}", get(get_entry)) + .route("/api/ledger/target/{target_type}/{target_id}", get(get_target_history)) + .route("/api/ledger/verify", get(verify_chain)) + .route("/api/ledger/stats", get(get_stats)) + .route("/api/ledger/export", get(export_ledger)) + .route("/api/ledger/create", post(create_entry)) + .layer(Extension(pool)) +} + +#[derive(Debug, Deserialize)] +pub struct ListQuery { + community_id: Option, + limit: Option, + offset: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreateEntryRequest { + community_id: Option, + action_type: String, + target_type: String, + target_id: Uuid, + reason: String, + rule_reference: Option, + evidence: Option, + target_snapshot: Option, + duration_hours: Option, + decision_type: Option, + vote_proposal_id: Option, + vote_result: Option, +} + +#[derive(Debug, Serialize)] +pub struct LedgerEntryResponse { + pub id: Uuid, + pub sequence_number: i64, + pub community_id: Option, + pub actor_user_id: Uuid, + pub actor_username: Option, + pub actor_role: String, + pub action_type: String, + pub target_type: String, + pub target_id: Uuid, + pub reason: String, + pub rule_reference: Option, + pub evidence: Option, + pub duration_hours: Option, + pub decision_type: String, + pub entry_hash: String, + pub created_at: chrono::DateTime, +} + +async fn list_entries( + Extension(pool): Extension, + auth: AuthUser, + Query(query): Query, +) -> Result)> { + let can_view = check_ledger_access(&pool, auth.user_id, query.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "You don't have permission to view the moderation ledger"})), + )); + } + + let limit = query.limit.unwrap_or(50).min(100); + let offset = query.offset.unwrap_or(0); + + let entries = sqlx::query!( + r#"SELECT + ml.id, + ml.sequence_number, + ml.community_id, + ml.actor_user_id, + u.username AS "actor_username?", + ml.actor_role, + ml.action_type::text AS "action_type!", + ml.target_type, + ml.target_id, + ml.reason, + ml.rule_reference, + ml.evidence, + ml.duration_hours, + ml.decision_type, + ml.entry_hash, + ml.created_at + FROM moderation_ledger ml + LEFT JOIN users u ON u.id = ml.actor_user_id + WHERE ml.community_id IS NOT DISTINCT FROM $1 + ORDER BY ml.sequence_number DESC + LIMIT $2 OFFSET $3"#, + query.community_id, + limit, + offset, + ) + .fetch_all(&pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + let response: Vec = entries + .into_iter() + .map(|e| LedgerEntryResponse { + id: e.id, + sequence_number: e.sequence_number, + community_id: e.community_id, + actor_user_id: e.actor_user_id, + actor_username: e.actor_username, + actor_role: e.actor_role, + action_type: e.action_type, + target_type: e.target_type, + target_id: e.target_id, + reason: e.reason, + rule_reference: e.rule_reference, + evidence: e.evidence, + duration_hours: e.duration_hours, + decision_type: e.decision_type, + entry_hash: e.entry_hash, + created_at: e.created_at, + }) + .collect(); + + Ok(Json(response)) +} + +async fn get_entry( + Extension(pool): Extension, + auth: AuthUser, + Path(id): Path, +) -> Result)> { + // Use LedgerService for consistency + let entry = LedgerService::get_entry(&pool, id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + match entry { + Some(e) => { + let can_view = check_ledger_access(&pool, auth.user_id, e.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Access denied"})), + )); + } + + Ok(Json(json!({ + "id": e.id, + "sequence_number": e.sequence_number, + "community_id": e.community_id, + "actor_user_id": e.actor_user_id, + "actor_role": e.actor_role, + "action_type": e.action_type, + "target_type": e.target_type, + "target_id": e.target_id, + "reason": e.reason, + "rule_reference": e.rule_reference, + "evidence": e.evidence, + "duration_hours": e.duration_hours, + "decision_type": e.decision_type, + "entry_hash": e.entry_hash, + "created_at": e.created_at, + }))) + } + None => Err(( + StatusCode::NOT_FOUND, + Json(json!({"error": "Entry not found"})), + )), + } +} + +async fn get_target_history( + Extension(pool): Extension, + auth: AuthUser, + Path((target_type, target_id)): Path<(String, Uuid)>, +) -> Result)> { + let entries = LedgerService::get_entries_for_target(&pool, &target_type, target_id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + for entry in &entries { + let can_view = check_ledger_access(&pool, auth.user_id, entry.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Access denied to some entries"})), + )); + } + } + + Ok(Json(entries)) +} + +#[derive(Debug, Deserialize)] +pub struct VerifyQuery { + community_id: Option, +} + +async fn verify_chain( + Extension(pool): Extension, + auth: AuthUser, + Query(query): Query, +) -> Result)> { + let can_view = check_ledger_access(&pool, auth.user_id, query.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Access denied"})), + )); + } + + let result = LedgerService::verify_chain(&pool, query.community_id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + Ok(Json(result)) +} + +async fn get_stats( + Extension(pool): Extension, + auth: AuthUser, + Query(query): Query, +) -> Result)> { + let can_view = check_ledger_access(&pool, auth.user_id, query.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Access denied"})), + )); + } + + let stats = LedgerService::get_stats(&pool, query.community_id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + Ok(Json(stats)) +} + +#[derive(Debug, Deserialize)] +pub struct ExportQuery { + community_id: Option, + format: Option, +} + +async fn export_ledger( + Extension(pool): Extension, + auth: AuthUser, + Query(query): Query, +) -> Result)> { + let can_view = check_ledger_access(&pool, auth.user_id, query.community_id).await?; + if !can_view { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Access denied"})), + )); + } + + let format = query.format.unwrap_or_else(|| "json".to_string()); + + if format == "csv" { + let entries = LedgerService::get_entries(&pool, query.community_id, 100000, 0) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + let mut csv = String::from("id,sequence,community_id,actor_user_id,actor_role,action_type,target_type,target_id,reason,decision_type,entry_hash,created_at\n"); + for e in entries { + csv.push_str(&format!( + "{},{},{},{},{},{},{},{},\"{}\",{},{},{}\n", + e.id, + e.sequence_number, + e.community_id.map(|u| u.to_string()).unwrap_or_default(), + e.actor_user_id, + e.actor_role, + e.action_type, + e.target_type, + e.target_id, + e.reason.replace('"', "\"\""), + e.decision_type, + e.entry_hash, + e.created_at, + )); + } + + Ok(Json(json!({ + "format": "csv", + "data": csv, + }))) + } else { + let export = LedgerService::export_json(&pool, query.community_id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + Ok(Json(export)) + } +} + +async fn create_entry( + Extension(pool): Extension, + auth: AuthUser, + Json(req): Json, +) -> Result)> { + let is_mod = check_moderator_access(&pool, auth.user_id, req.community_id).await?; + if !is_mod { + return Err(( + StatusCode::FORBIDDEN, + Json(json!({"error": "Only moderators can create ledger entries"})), + )); + } + + if req.reason.len() < 20 { + return Err(( + StatusCode::BAD_REQUEST, + Json(json!({"error": "Reason must be at least 20 characters"})), + )); + } + + let actor_role = get_actor_role(&pool, auth.user_id, req.community_id).await?; + + let action_type = parse_action_type(&req.action_type).map_err(|e| { + (StatusCode::BAD_REQUEST, Json(json!({"error": e}))) + })?; + + let entry_id = LedgerService::create_entry( + &pool, + req.community_id, + auth.user_id, + &actor_role, + action_type, + &req.target_type, + req.target_id, + &req.reason, + req.rule_reference.as_deref(), + req.evidence, + req.target_snapshot, + req.duration_hours, + req.decision_type.as_deref().unwrap_or("unilateral"), + req.vote_proposal_id, + req.vote_result, + ) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + Ok(Json(json!({ + "id": entry_id, + "message": "Ledger entry created successfully" + }))) +} + +async fn check_ledger_access( + pool: &PgPool, + user_id: Uuid, + community_id: Option, +) -> Result)> { + let is_admin = sqlx::query_scalar!( + r#"SELECT EXISTS( + SELECT 1 FROM user_roles ur + JOIN roles r ON r.id = ur.role_id + WHERE ur.user_id = $1 AND r.name = 'platform_admin' + ) AS "exists!""#, + user_id + ) + .fetch_one(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + if is_admin { + return Ok(true); + } + + if let Some(cid) = community_id { + let is_public = sqlx::query_scalar!( + r#"SELECT COALESCE( + (SELECT (cp.settings->>'public_ledger')::boolean + FROM community_plugins cp + JOIN plugins p ON p.id = cp.plugin_id + WHERE cp.community_id = $1 AND p.name = 'moderation_ledger'), + true + ) AS "is_public!""#, + cid + ) + .fetch_one(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + if is_public { + let is_member = sqlx::query_scalar!( + r#"SELECT EXISTS( + SELECT 1 FROM community_members WHERE community_id = $1 AND user_id = $2 + ) AS "exists!""#, + cid, + user_id + ) + .fetch_one(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + return Ok(is_member); + } + + return check_moderator_access(pool, user_id, Some(cid)).await; + } + + Ok(false) +} + +async fn check_moderator_access( + pool: &PgPool, + user_id: Uuid, + community_id: Option, +) -> Result)> { + let is_platform_mod = sqlx::query_scalar!( + r#"SELECT EXISTS( + SELECT 1 FROM user_roles ur + JOIN roles r ON r.id = ur.role_id + WHERE ur.user_id = $1 AND r.name IN ('platform_admin', 'platform_moderator') + ) AS "exists!""#, + user_id + ) + .fetch_one(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + if is_platform_mod { + return Ok(true); + } + + if let Some(cid) = community_id { + let is_community_mod = sqlx::query_scalar!( + r#"SELECT EXISTS( + SELECT 1 FROM community_members + WHERE community_id = $1 AND user_id = $2 AND role IN ('admin', 'moderator') + ) AS "exists!""#, + cid, + user_id + ) + .fetch_one(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + return Ok(is_community_mod); + } + + Ok(false) +} + +async fn get_actor_role( + pool: &PgPool, + user_id: Uuid, + community_id: Option, +) -> Result)> { + let platform_role = sqlx::query_scalar!( + r#"SELECT r.name + FROM user_roles ur + JOIN roles r ON r.id = ur.role_id + WHERE ur.user_id = $1 + ORDER BY CASE r.name + WHEN 'platform_admin' THEN 1 + WHEN 'platform_moderator' THEN 2 + ELSE 3 + END + LIMIT 1"#, + user_id + ) + .fetch_optional(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + if let Some(role) = platform_role { + return Ok(role); + } + + if let Some(cid) = community_id { + let community_role = sqlx::query_scalar!( + "SELECT role FROM community_members WHERE community_id = $1 AND user_id = $2", + cid, + user_id + ) + .fetch_optional(pool) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": e.to_string()})), + ) + })?; + + if let Some(role) = community_role { + return Ok(role); + } + } + + Ok("user".to_string()) +} + +fn parse_action_type(action: &str) -> Result { + match action { + "content_remove" => Ok(ModerationActionType::ContentRemove), + "content_hide" => Ok(ModerationActionType::ContentHide), + "content_restore" => Ok(ModerationActionType::ContentRestore), + "content_edit" => Ok(ModerationActionType::ContentEdit), + "content_flag" => Ok(ModerationActionType::ContentFlag), + "content_unflag" => Ok(ModerationActionType::ContentUnflag), + "user_warn" => Ok(ModerationActionType::UserWarn), + "user_mute" => Ok(ModerationActionType::UserMute), + "user_unmute" => Ok(ModerationActionType::UserUnmute), + "user_suspend" => Ok(ModerationActionType::UserSuspend), + "user_unsuspend" => Ok(ModerationActionType::UserUnsuspend), + "user_ban" => Ok(ModerationActionType::UserBan), + "user_unban" => Ok(ModerationActionType::UserUnban), + "user_role_change" => Ok(ModerationActionType::UserRoleChange), + "community_setting_change" => Ok(ModerationActionType::CommunitySettingChange), + "community_rule_add" => Ok(ModerationActionType::CommunityRuleAdd), + "community_rule_edit" => Ok(ModerationActionType::CommunityRuleEdit), + "community_rule_remove" => Ok(ModerationActionType::CommunityRuleRemove), + "proposal_close" => Ok(ModerationActionType::ProposalClose), + "proposal_reopen" => Ok(ModerationActionType::ProposalReopen), + "proposal_archive" => Ok(ModerationActionType::ProposalArchive), + "vote_invalidate" => Ok(ModerationActionType::VoteInvalidate), + "vote_restore" => Ok(ModerationActionType::VoteRestore), + "escalate_to_admin" => Ok(ModerationActionType::EscalateToAdmin), + "escalate_to_community" => Ok(ModerationActionType::EscalateToCommunity), + "appeal_received" => Ok(ModerationActionType::AppealReceived), + "appeal_resolved" => Ok(ModerationActionType::AppealResolved), + _ => Err(format!("Unknown action type: {}", action)), + } +} diff --git a/backend/src/api/notifications.rs b/backend/src/api/notifications.rs new file mode 100644 index 0000000..635811b --- /dev/null +++ b/backend/src/api/notifications.rs @@ -0,0 +1,132 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::Serialize; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; + +#[derive(Debug, Serialize)] +pub struct Notification { + pub id: Uuid, + pub notification_type: String, + pub title: String, + pub message: Option, + pub link: Option, + pub is_read: bool, + pub created_at: DateTime, +} + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/notifications", get(list_notifications)) + .route("/api/notifications/unread-count", get(unread_count)) + .route("/api/notifications/{id}/read", post(mark_read)) + .route("/api/notifications/read-all", post(mark_all_read)) + .with_state(pool) +} + +async fn list_notifications( + auth: AuthUser, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let notifications = sqlx::query!( + r#" + SELECT id, type as notification_type, title, message, link, is_read, created_at + FROM notifications + WHERE user_id = $1 + ORDER BY created_at DESC + LIMIT 50 + "#, + auth.user_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = notifications.into_iter().map(|n| Notification { + id: n.id, + notification_type: n.notification_type, + title: n.title, + message: n.message, + link: n.link, + is_read: n.is_read, + created_at: n.created_at, + }).collect(); + + Ok(Json(result)) +} + +async fn unread_count( + auth: AuthUser, + State(pool): State, +) -> Result, (StatusCode, String)> { + let count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM notifications WHERE user_id = $1 AND is_read = false", + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({ "count": count.unwrap_or(0) }))) +} + +async fn mark_read( + auth: AuthUser, + Path(notification_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + sqlx::query!( + "UPDATE notifications SET is_read = true WHERE id = $1 AND user_id = $2", + notification_id, + auth.user_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({ "status": "read" }))) +} + +async fn mark_all_read( + auth: AuthUser, + State(pool): State, +) -> Result, (StatusCode, String)> { + sqlx::query!( + "UPDATE notifications SET is_read = true WHERE user_id = $1 AND is_read = false", + auth.user_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({ "status": "all_read" }))) +} + +#[allow(dead_code)] +pub async fn create_notification( + pool: &PgPool, + user_id: Uuid, + notification_type: &str, + title: &str, + message: Option<&str>, + link: Option<&str>, +) -> Result<(), sqlx::Error> { + sqlx::query!( + "INSERT INTO notifications (user_id, type, title, message, link) VALUES ($1, $2, $3, $4, $5)", + user_id, + notification_type, + title, + message, + link + ) + .execute(pool) + .await?; + Ok(()) +} diff --git a/backend/src/api/permissions.rs b/backend/src/api/permissions.rs new file mode 100644 index 0000000..fd8c66b --- /dev/null +++ b/backend/src/api/permissions.rs @@ -0,0 +1,131 @@ +//! Permission checking utilities for API endpoints. +//! +//! Provides reusable functions to check user permissions against the role system. + +use axum::http::StatusCode; +use sqlx::PgPool; +use uuid::Uuid; + +/// Check if a user has a specific permission, optionally within a community scope. +pub async fn user_has_permission( + pool: &PgPool, + user_id: Uuid, + permission: &str, + community_id: Option, +) -> Result { + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, $2, $3)", + user_id, + permission, + community_id + ) + .fetch_one(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + Ok(has_perm) +} + +/// Require a permission, returning Forbidden error if not granted. +pub async fn require_permission( + pool: &PgPool, + user_id: Uuid, + permission: &str, + community_id: Option, +) -> Result<(), (StatusCode, String)> { + if !user_has_permission(pool, user_id, permission, community_id).await? { + return Err(( + StatusCode::FORBIDDEN, + format!("Permission '{}' required", permission), + )); + } + Ok(()) +} + +/// Require any of the given permissions. +pub async fn require_any_permission( + pool: &PgPool, + user_id: Uuid, + permissions: &[&str], + community_id: Option, +) -> Result<(), (StatusCode, String)> { + for perm in permissions { + if user_has_permission(pool, user_id, perm, community_id).await? { + return Ok(()); + } + } + Err(( + StatusCode::FORBIDDEN, + format!("One of these permissions required: {}", permissions.join(", ")), + )) +} + +/// Check if user is a platform admin (has platform.admin permission). +#[allow(dead_code)] +pub async fn is_platform_admin( + pool: &PgPool, + user_id: Uuid, +) -> Result { + user_has_permission(pool, user_id, "platform.admin", None).await +} + +/// Require platform admin access. +#[allow(dead_code)] +pub async fn require_platform_admin( + pool: &PgPool, + user_id: Uuid, +) -> Result<(), (StatusCode, String)> { + require_permission(pool, user_id, "platform.admin", None).await +} + +/// Check if user is a community admin or moderator. +#[allow(dead_code)] +pub async fn is_community_staff( + pool: &PgPool, + user_id: Uuid, + community_id: Uuid, +) -> Result { + let is_admin = user_has_permission(pool, user_id, "community.admin", Some(community_id)).await?; + if is_admin { + return Ok(true); + } + user_has_permission(pool, user_id, "community.moderate", Some(community_id)).await +} + +/// Permission constants for common operations. +/// Used throughout API handlers for authorization. +pub mod perms { + // Platform-level + pub const PLATFORM_ADMIN: &str = "platform.admin"; + pub const PLATFORM_SETTINGS: &str = "platform.settings"; + pub const PLATFORM_PLUGINS: &str = "platform.plugins"; + + // Community-level + pub const COMMUNITY_CREATE: &str = "community.create"; + pub const COMMUNITY_ADMIN: &str = "community.admin"; + pub const COMMUNITY_SETTINGS: &str = "community.settings"; + pub const COMMUNITY_MODERATE: &str = "community.moderate"; + + // Proposals + pub const PROPOSAL_CREATE: &str = "proposal.create"; + pub const PROPOSAL_EDIT_OWN: &str = "proposal.edit_own"; + pub const PROPOSAL_EDIT_ANY: &str = "proposal.edit_any"; + pub const PROPOSAL_DELETE_OWN: &str = "proposal.delete_own"; + pub const PROPOSAL_DELETE_ANY: &str = "proposal.delete_any"; + pub const PROPOSAL_MANAGE_STATUS: &str = "proposal.manage_status"; + + // Voting + pub const VOTE_CAST: &str = "vote.cast"; + pub const VOTE_VIEW_RESULTS: &str = "vote.view_results"; + pub const VOTING_CONFIG: &str = "voting.configure"; + + // Moderation + pub const MOD_BAN_USERS: &str = "moderation.ban_users"; + pub const MOD_REMOVE_CONTENT: &str = "moderation.remove_content"; + pub const MOD_VIEW_REPORTS: &str = "moderation.view_reports"; + + // Users + pub const USER_MANAGE: &str = "users.manage"; + pub const USER_INVITE: &str = "users.invite"; +} diff --git a/backend/src/api/plugins.rs b/backend/src/api/plugins.rs new file mode 100644 index 0000000..eda0527 --- /dev/null +++ b/backend/src/api/plugins.rs @@ -0,0 +1,1490 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post, put}, + Extension, + Json, Router, +}; +use base64::{engine::general_purpose, Engine as _}; +use chrono::{DateTime, Utc}; +use ed25519_dalek::{Signature, VerifyingKey}; +use jsonschema::{Draft, JSONSchema}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use sqlx::Row; +use std::net::IpAddr; +use std::sync::Arc; +use uuid::Uuid; +use sha2::{Digest, Sha256}; +use reqwest::Url; + +use crate::auth::AuthUser; +use crate::plugins::PluginManager; +use crate::plugins::wasm::host_api::PluginManifest; + +#[derive(Debug, Serialize)] +pub struct CommunityPluginInfo { + pub name: String, + pub version: String, + pub description: Option, + pub is_core: bool, + pub global_is_active: bool, + pub community_is_active: bool, + pub settings: Value, + pub settings_schema: Option, +} + +async fn get_plugin_policy( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) if m.role == "admin" || m.role == "moderator" => {} + _ => return Err((StatusCode::FORBIDDEN, "Must be admin or moderator".to_string())), + } + + let row = sqlx::query!( + r#"SELECT settings as "settings!: serde_json::Value" FROM communities WHERE id = $1 AND is_active = true"#, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Community not found".to_string()))?; + + Ok(Json(PluginPolicyResponse { + trust_policy: parse_trust_policy(&row.settings), + install_sources: parse_install_sources(&row.settings), + allow_outbound_http: parse_bool(&row.settings, "plugin_allow_outbound_http", false), + http_egress_allowlist: parse_string_list(&row.settings, "plugin_http_egress_allowlist"), + registry_allowlist: parse_string_list(&row.settings, "plugin_registry_allowlist"), + allow_background_jobs: parse_bool(&row.settings, "plugin_allow_background_jobs", false), + trusted_publishers: parse_string_list(&row.settings, "plugin_trusted_publishers"), + })) +} + +async fn update_plugin_policy( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) if m.role == "admin" || m.role == "moderator" => {} + _ => return Err((StatusCode::FORBIDDEN, "Must be admin or moderator".to_string())), + } + + let current = sqlx::query!( + r#"SELECT settings as "settings!: serde_json::Value" FROM communities WHERE id = $1 AND is_active = true"#, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Community not found".to_string()))?; + + let mut patch = serde_json::Map::new(); + let mut event_patch = serde_json::Map::new(); + + if let Some(tp) = req.trust_policy { + let v = trust_policy_str(tp).to_string(); + patch.insert("plugin_trust_policy".to_string(), Value::String(v.clone())); + event_patch.insert("trust_policy".to_string(), Value::String(v)); + } + + if let Some(sources) = &req.install_sources { + let v = install_sources_json(sources); + patch.insert("plugin_install_sources".to_string(), v.clone()); + event_patch.insert("install_sources".to_string(), v); + } + + if let Some(v) = req.allow_outbound_http { + patch.insert("plugin_allow_outbound_http".to_string(), Value::Bool(v)); + event_patch.insert("allow_outbound_http".to_string(), Value::Bool(v)); + } + + if let Some(list) = &req.http_egress_allowlist { + let mut out = list.clone(); + out.sort(); + out.dedup(); + let v = Value::Array(out.into_iter().map(Value::String).collect()); + patch.insert("plugin_http_egress_allowlist".to_string(), v.clone()); + event_patch.insert("http_egress_allowlist".to_string(), v); + } + + if let Some(list) = &req.registry_allowlist { + let mut out = list.clone(); + out.sort(); + out.dedup(); + let v = Value::Array(out.into_iter().map(Value::String).collect()); + patch.insert("plugin_registry_allowlist".to_string(), v.clone()); + event_patch.insert("registry_allowlist".to_string(), v); + } + + if let Some(v) = req.allow_background_jobs { + patch.insert("plugin_allow_background_jobs".to_string(), Value::Bool(v)); + event_patch.insert("allow_background_jobs".to_string(), Value::Bool(v)); + } + + if let Some(list) = &req.trusted_publishers { + let mut out = list.clone(); + out.sort(); + out.dedup(); + let v = Value::Array(out.into_iter().map(Value::String).collect()); + patch.insert("plugin_trusted_publishers".to_string(), v.clone()); + event_patch.insert("trusted_publishers".to_string(), v); + } + + if patch.is_empty() { + return Ok(Json(PluginPolicyResponse { + trust_policy: parse_trust_policy(¤t.settings), + install_sources: parse_install_sources(¤t.settings), + allow_outbound_http: parse_bool(¤t.settings, "plugin_allow_outbound_http", false), + http_egress_allowlist: parse_string_list(¤t.settings, "plugin_http_egress_allowlist"), + registry_allowlist: parse_string_list(¤t.settings, "plugin_registry_allowlist"), + allow_background_jobs: parse_bool(¤t.settings, "plugin_allow_background_jobs", false), + trusted_publishers: parse_string_list(¤t.settings, "plugin_trusted_publishers"), + })); + } + + let patch_value = Value::Object(patch); + + let row = sqlx::query!( + r#" + UPDATE communities + SET settings = settings || $2::jsonb, + updated_at = NOW() + WHERE id = $1 AND is_active = true + RETURNING settings as "settings!: serde_json::Value" + "#, + community_id, + patch_value + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Community not found".to_string()))?; + + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, NULL, 'plugin.policy_updated', $3)"#, + community_id, + auth.user_id, + Value::Object(event_patch) + ) + .execute(&pool) + .await; + + Ok(Json(PluginPolicyResponse { + trust_policy: parse_trust_policy(&row.settings), + install_sources: parse_install_sources(&row.settings), + allow_outbound_http: parse_bool(&row.settings, "plugin_allow_outbound_http", false), + http_egress_allowlist: parse_string_list(&row.settings, "plugin_http_egress_allowlist"), + registry_allowlist: parse_string_list(&row.settings, "plugin_registry_allowlist"), + allow_background_jobs: parse_bool(&row.settings, "plugin_allow_background_jobs", false), + trusted_publishers: parse_string_list(&row.settings, "plugin_trusted_publishers"), + })) +} + +#[derive(Debug, Deserialize)] +pub struct UpdateCommunityPluginRequest { + pub is_active: Option, + pub settings: Option, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum PluginTrustPolicy { + SignedOnly, + UnsignedAllowed, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum PluginInstallSource { + Upload, + Registry, +} + +#[derive(Debug, Serialize)] +pub struct PluginPolicyResponse { + pub trust_policy: PluginTrustPolicy, + pub install_sources: Vec, + pub allow_outbound_http: bool, + pub http_egress_allowlist: Vec, + pub registry_allowlist: Vec, + pub allow_background_jobs: bool, + pub trusted_publishers: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct UpdatePluginPolicyRequest { + pub trust_policy: Option, + pub install_sources: Option>, + pub allow_outbound_http: Option, + pub http_egress_allowlist: Option>, + pub registry_allowlist: Option>, + pub allow_background_jobs: Option, + pub trusted_publishers: Option>, +} + +#[derive(Debug, Deserialize)] +pub struct UploadPluginPackageRequest { + pub name: String, + pub version: String, + pub description: Option, + pub publisher: Option, + pub manifest: Value, + pub wasm_base64: String, + pub signature_base64: Option, +} + +#[derive(Debug, Deserialize)] +pub struct InstallRegistryPluginPackageRequest { + pub url: String, +} + +#[derive(Debug, Serialize)] +pub struct CommunityPluginPackageInfo { + pub package_id: Uuid, + pub name: String, + pub version: String, + pub description: Option, + pub publisher: String, + pub source: String, + pub registry_url: Option, + pub wasm_sha256: String, + pub manifest: Value, + pub settings: Value, + pub signature_present: bool, + pub installed_at: DateTime, + pub is_active: bool, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateCommunityPluginPackageRequest { + pub is_active: Option, + pub settings: Option, +} + +fn redacted_settings_keys(settings: &Value) -> Vec { + let Some(obj) = settings.as_object() else { + return Vec::new(); + }; + + let mut keys: Vec = obj + .keys() + .map(|k| { + let lower = k.to_ascii_lowercase(); + if lower.contains("secret") + || lower.contains("token") + || lower.contains("password") + || lower.ends_with("_key") + || lower.ends_with("_token") + { + "".to_string() + } else { + k.to_string() + } + }) + .collect(); + + keys.sort(); + keys.dedup(); + keys +} + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route( + "/api/communities/{community_id}/plugins", + get(list_community_plugins), + ) + .route( + "/api/communities/{community_id}/plugins/{plugin_name}", + put(update_community_plugin), + ) + .route( + "/api/communities/{community_id}/plugin-policy", + get(get_plugin_policy).put(update_plugin_policy), + ) + .route( + "/api/communities/{community_id}/plugin-packages", + get(list_community_plugin_packages).post(upload_plugin_package), + ) + .route( + "/api/communities/{community_id}/plugin-packages/{package_id}", + put(update_community_plugin_package), + ) + .route( + "/api/communities/{community_id}/plugin-packages/install-registry", + post(install_registry_plugin_package), + ) + .with_state(pool) +} + +fn redact_settings_values(settings: &Value) -> Value { + let Some(obj) = settings.as_object() else { + return settings.clone(); + }; + + let mut out = serde_json::Map::new(); + for (k, v) in obj { + let lower = k.to_ascii_lowercase(); + if lower.contains("secret") + || lower.contains("token") + || lower.contains("password") + || lower.ends_with("_key") + || lower.ends_with("_token") + { + out.insert(k.clone(), Value::String("".to_string())); + } else { + out.insert(k.clone(), v.clone()); + } + } + + Value::Object(out) +} + +fn decode_base64(s: &str) -> Result, (StatusCode, String)> { + general_purpose::STANDARD + .decode(s) + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid base64".to_string())) +} + +fn sha256_hex(bytes: &[u8]) -> String { + let mut hasher = Sha256::new(); + hasher.update(bytes); + let out = hasher.finalize(); + format!("{:x}", out) +} + +fn verify_signature_if_required( + trust_policy: PluginTrustPolicy, + trusted_publishers: &[String], + publisher: &str, + signature_base64: Option<&str>, + wasm_sha256: &str, +) -> Result>, (StatusCode, String)> { + let Some(sig_b64) = signature_base64 else { + if matches!(trust_policy, PluginTrustPolicy::SignedOnly) { + return Err((StatusCode::FORBIDDEN, "Plugin must be signed".to_string())); + } + return Ok(None); + }; + + let sig_bytes = decode_base64(sig_b64)?; + let sig_arr: [u8; 64] = sig_bytes + .as_slice() + .try_into() + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid signature".to_string()))?; + let sig = Signature::from_bytes(&sig_arr); + + if matches!(trust_policy, PluginTrustPolicy::SignedOnly) { + if trusted_publishers.is_empty() { + return Err((StatusCode::FORBIDDEN, "No trusted publishers configured".to_string())); + } + if !trusted_publishers.iter().any(|p| p == publisher) { + return Err((StatusCode::FORBIDDEN, "Publisher not trusted".to_string())); + } + } + + let key_bytes = decode_base64(publisher) + .map_err(|_| (StatusCode::FORBIDDEN, "Invalid publisher key".to_string()))?; + let key_arr: [u8; 32] = key_bytes + .as_slice() + .try_into() + .map_err(|_| (StatusCode::FORBIDDEN, "Invalid publisher key".to_string()))?; + let key = VerifyingKey::from_bytes(&key_arr) + .map_err(|_| (StatusCode::FORBIDDEN, "Invalid publisher key".to_string()))?; + + key.verify_strict(wasm_sha256.as_bytes(), &sig) + .map_err(|_| (StatusCode::FORBIDDEN, "Invalid signature".to_string()))?; + + Ok(Some(sig_arr.to_vec())) +} + +fn enforce_registry_allowlist(url: &Url, allowlist: &[String]) -> Result<(), (StatusCode, String)> { + let host = url + .host_str() + .ok_or((StatusCode::BAD_REQUEST, "Registry URL must include host".to_string()))?; + + if let Ok(ip) = host.parse::() { + let is_disallowed = match ip { + IpAddr::V4(v4) => v4.is_loopback() || v4.is_private() || v4.is_link_local() || v4.is_unspecified(), + IpAddr::V6(v6) => { + v6.is_loopback() || v6.is_unique_local() || v6.is_unicast_link_local() || v6.is_unspecified() + } + }; + + if is_disallowed { + return Err((StatusCode::FORBIDDEN, "Registry host is not allowed".to_string())); + } + } + + if host.eq_ignore_ascii_case("localhost") { + return Err((StatusCode::FORBIDDEN, "Registry host is not allowed".to_string())); + } + + if !allowlist.is_empty() && !allowlist.iter().any(|h| h == host) { + return Err((StatusCode::FORBIDDEN, "Registry host not in allowlist".to_string())); + } + + Ok(()) +} + +async fn ensure_admin_or_moderator( + pool: &PgPool, + user_id: Uuid, + community_id: Uuid, +) -> Result<(), (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + user_id, + community_id + ) + .fetch_optional(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) if m.role == "admin" || m.role == "moderator" => Ok(()), + _ => Err((StatusCode::FORBIDDEN, "Must be admin or moderator".to_string())), + } +} + +async fn load_community_settings( + pool: &PgPool, + community_id: Uuid, +) -> Result { + let row = sqlx::query!( + r#"SELECT settings as "settings!: serde_json::Value" FROM communities WHERE id = $1 AND is_active = true"#, + community_id + ) + .fetch_optional(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Community not found".to_string()))?; + + Ok(row.settings) +} + +async fn list_community_plugin_packages( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + ensure_admin_or_moderator(&pool, auth.user_id, community_id).await?; + + let rows = sqlx::query( + r#" + SELECT + pp.id as package_id, + pp.name, + pp.version, + pp.description, + COALESCE(pp.publisher, '') as publisher, + pp.source, + pp.registry_url, + pp.wasm_sha256, + pp.manifest, + (pp.signature IS NOT NULL) as signature_present, + COALESCE(cpp.settings, '{}'::jsonb) as settings, + cpp.installed_at, + cpp.is_active + FROM community_plugin_packages cpp + JOIN plugin_packages pp ON pp.id = cpp.package_id + WHERE cpp.community_id = $1 + ORDER BY cpp.installed_at DESC + "#, + ) + .bind(community_id) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let mut out: Vec = Vec::new(); + for r in rows { + out.push(CommunityPluginPackageInfo { + package_id: r + .try_get::("package_id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + name: r + .try_get::("name") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + version: r + .try_get::("version") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + description: r + .try_get::, _>("description") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + publisher: r + .try_get::("publisher") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + source: r + .try_get::("source") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + registry_url: r + .try_get::, _>("registry_url") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + wasm_sha256: r + .try_get::("wasm_sha256") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + manifest: r + .try_get::("manifest") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + settings: redact_settings_values( + &r.try_get::("settings") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + ), + signature_present: r + .try_get::("signature_present") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + installed_at: r + .try_get::, _>("installed_at") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + is_active: r + .try_get::("is_active") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?, + }); + } + + Ok(Json(out)) +} + +async fn update_community_plugin_package( + auth: AuthUser, + Path((community_id, package_id)): Path<(Uuid, Uuid)>, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + if req.is_active.is_none() && req.settings.is_none() { + return Err(( + StatusCode::BAD_REQUEST, + "Must provide is_active and/or settings".to_string(), + )); + } + + ensure_admin_or_moderator(&pool, auth.user_id, community_id).await?; + + let pkg = sqlx::query!( + r#"SELECT id, name, version, description, COALESCE(publisher,'') as publisher, source, registry_url, wasm_sha256, + manifest as "manifest!: serde_json::Value", (signature IS NOT NULL) as "signature_present!: bool" + FROM plugin_packages + WHERE id = $1"#, + package_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Package not found".to_string()))?; + + let link_row = sqlx::query( + r#"SELECT is_active, COALESCE(settings,'{}'::jsonb) as settings, installed_at + FROM community_plugin_packages + WHERE community_id = $1 AND package_id = $2"#, + ) + .bind(community_id) + .bind(package_id) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Package not installed for community".to_string()))?; + + let link_is_active: bool = link_row + .try_get("is_active") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let link_settings: Value = link_row + .try_get("settings") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let _link_installed_at: DateTime = link_row + .try_get("installed_at") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let manifest: PluginManifest = serde_json::from_value(pkg.manifest.clone()) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Stored manifest invalid: {e}")))?; + + if let Some(settings) = &req.settings { + if let Some(schema) = &manifest.settings_schema { + if !schema.is_null() { + let compiled = JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(schema) + .map_err(|e| { + (StatusCode::INTERNAL_SERVER_ERROR, format!("Invalid settings schema: {e}")) + })?; + + if !compiled.is_valid(settings) { + let errors = compiled.validate(settings).err().unwrap(); + let msgs: Vec = errors.take(5).map(|e| e.to_string()).collect(); + return Err(( + StatusCode::BAD_REQUEST, + format!("Invalid settings: {}", msgs.join("; ")), + )); + } + } + } + } + + let new_is_active = req.is_active.unwrap_or(link_is_active); + let new_settings = req.settings.clone().unwrap_or(link_settings.clone()); + + let row = sqlx::query( + r#"UPDATE community_plugin_packages + SET is_active = $3, + settings = $4 + WHERE community_id = $1 AND package_id = $2 + RETURNING installed_at"#, + ) + .bind(community_id) + .bind(package_id) + .bind(new_is_active) + .bind(new_settings.clone()) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let updated_installed_at: DateTime = row + .try_get("installed_at") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // If activating, deactivate any other active versions of this plugin name in THIS community, + // and invoke their lifecycle hooks. + if new_is_active { + let rows = sqlx::query( + r#" + SELECT cpp.package_id, COALESCE(cpp.settings,'{}'::jsonb) as settings + FROM community_plugin_packages cpp + JOIN plugin_packages pp ON pp.id = cpp.package_id + WHERE cpp.community_id = $1 + AND cpp.is_active = true + AND pp.name = $2 + AND cpp.package_id <> $3 + "#, + ) + .bind(community_id) + .bind(&pkg.name) + .bind(package_id) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + for r in rows { + let other_package_id: Uuid = r + .try_get("package_id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let other_settings: Value = r + .try_get("settings") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let _ = sqlx::query( + "UPDATE community_plugin_packages SET is_active = false WHERE community_id = $1 AND package_id = $2", + ) + .bind(community_id) + .bind(other_package_id) + .execute(&pool) + .await; + + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + other_package_id, + true, + other_settings.clone(), + false, + other_settings, + ) + .await; + } + } + + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + package_id, + link_is_active, + link_settings.clone(), + new_is_active, + new_settings.clone(), + ) + .await; + + if req.settings.is_some() { + let keys = redacted_settings_keys(req.settings.as_ref().unwrap()); + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, NULL, 'plugin.package_settings_updated', $3)"#, + community_id, + auth.user_id, + json!({"package_id": package_id, "name": pkg.name, "version": pkg.version, "keys": keys}) + ) + .execute(&pool) + .await; + } + + if req.is_active.is_some() { + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, NULL, $3, $4)"#, + community_id, + auth.user_id, + if new_is_active { "plugin.package_activated" } else { "plugin.package_deactivated" }, + json!({"package_id": package_id, "name": pkg.name, "version": pkg.version}) + ) + .execute(&pool) + .await; + } + + Ok(Json(CommunityPluginPackageInfo { + package_id, + name: pkg.name, + version: pkg.version, + description: pkg.description, + publisher: pkg.publisher.unwrap_or_default(), + source: pkg.source, + registry_url: pkg.registry_url, + wasm_sha256: pkg.wasm_sha256, + manifest: pkg.manifest, + settings: redact_settings_values(&new_settings), + signature_present: pkg.signature_present, + installed_at: updated_installed_at, + is_active: new_is_active, + })) +} + +async fn upload_plugin_package( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ensure_admin_or_moderator(&pool, auth.user_id, community_id).await?; + + let settings = load_community_settings(&pool, community_id).await?; + let trust_policy = parse_trust_policy(&settings); + let sources = parse_install_sources(&settings); + let trusted_publishers = parse_string_list(&settings, "plugin_trusted_publishers"); + + if !sources.contains(&PluginInstallSource::Upload) { + return Err((StatusCode::FORBIDDEN, "Upload installs are disabled by policy".to_string())); + } + + let publisher = req.publisher.unwrap_or_default(); + + let wasm_bytes = decode_base64(&req.wasm_base64)?; + let wasm_sha256 = sha256_hex(&wasm_bytes); + + let parsed_manifest: PluginManifest = serde_json::from_value(req.manifest.clone()) + .map_err(|e| (StatusCode::BAD_REQUEST, format!("Invalid manifest: {e}")))?; + + if parsed_manifest.name != req.name { + return Err(( + StatusCode::BAD_REQUEST, + "Manifest name must match request name".to_string(), + )); + } + + if parsed_manifest.version != req.version { + return Err(( + StatusCode::BAD_REQUEST, + "Manifest version must match request version".to_string(), + )); + } + + let signature_bytes = verify_signature_if_required( + trust_policy, + &trusted_publishers, + &publisher, + req.signature_base64.as_deref(), + &wasm_sha256, + )?; + + let package_id = sqlx::query( + r#" + INSERT INTO plugin_packages + (name, version, description, publisher, source, registry_url, wasm_sha256, wasm_bytes, manifest, signature) + VALUES + ($1, $2, $3, $4, 'upload', NULL, $5, $6, $7, $8) + ON CONFLICT (name, version, publisher, wasm_sha256) + DO UPDATE SET + description = EXCLUDED.description, + manifest = EXCLUDED.manifest, + signature = EXCLUDED.signature + RETURNING id + "#, + ) + .bind(&req.name) + .bind(&req.version) + .bind(&req.description) + .bind(&publisher) + .bind(&wasm_sha256) + .bind(&wasm_bytes) + .bind(&req.manifest) + .bind(&signature_bytes) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .try_get::("id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let row = sqlx::query( + r#" + INSERT INTO community_plugin_packages (community_id, package_id, installed_by, is_active) + VALUES ($1, $2, $3, true) + ON CONFLICT (community_id, package_id) + DO UPDATE SET is_active = EXCLUDED.is_active + RETURNING installed_at, is_active + "#, + ) + .bind(community_id) + .bind(package_id) + .bind(auth.user_id) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Deactivate other active versions of this plugin in THIS community. + let rows = sqlx::query( + r#" + SELECT cpp.package_id, COALESCE(cpp.settings,'{}'::jsonb) as settings + FROM community_plugin_packages cpp + JOIN plugin_packages pp ON pp.id = cpp.package_id + WHERE cpp.community_id = $1 + AND cpp.is_active = true + AND pp.name = $2 + AND cpp.package_id <> $3 + "#, + ) + .bind(community_id) + .bind(&req.name) + .bind(package_id) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + for r in rows { + let other_package_id: Uuid = r + .try_get("package_id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let other_settings: Value = r + .try_get("settings") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let _ = sqlx::query( + "UPDATE community_plugin_packages SET is_active = false WHERE community_id = $1 AND package_id = $2", + ) + .bind(community_id) + .bind(other_package_id) + .execute(&pool) + .await; + + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + other_package_id, + true, + other_settings.clone(), + false, + other_settings, + ) + .await; + } + + // Activate lifecycle hook for the newly uploaded package. + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + package_id, + false, + json!({}), + true, + json!({}), + ) + .await; + + let installed_at = row + .try_get::, _>("installed_at") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let is_active = row + .try_get::("is_active") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, NULL, 'plugin.package_uploaded', $3)"#, + community_id, + auth.user_id, + json!({"name": req.name, "version": req.version, "publisher": publisher, "sha256": wasm_sha256}) + ) + .execute(&pool) + .await; + + Ok(Json(CommunityPluginPackageInfo { + package_id, + name: req.name, + version: req.version, + description: req.description, + publisher, + source: "upload".to_string(), + registry_url: None, + wasm_sha256, + manifest: req.manifest, + settings: json!({}), + signature_present: signature_bytes.is_some(), + installed_at, + is_active, + })) +} + +async fn install_registry_plugin_package( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ensure_admin_or_moderator(&pool, auth.user_id, community_id).await?; + + let settings = load_community_settings(&pool, community_id).await?; + let trust_policy = parse_trust_policy(&settings); + let sources = parse_install_sources(&settings); + let trusted_publishers = parse_string_list(&settings, "plugin_trusted_publishers"); + let registry_allowlist = parse_string_list(&settings, "plugin_registry_allowlist"); + + if !sources.contains(&PluginInstallSource::Registry) { + return Err((StatusCode::FORBIDDEN, "Registry installs are disabled by policy".to_string())); + } + + let url = Url::parse(&req.url) + .map_err(|_| (StatusCode::BAD_REQUEST, "Invalid registry URL".to_string()))?; + + match url.scheme() { + "https" | "http" => {} + _ => return Err((StatusCode::BAD_REQUEST, "Invalid registry URL scheme".to_string())), + } + + enforce_registry_allowlist(&url, ®istry_allowlist)?; + + let res = reqwest::get(url.clone()) + .await + .map_err(|e| (StatusCode::BAD_GATEWAY, e.to_string()))?; + + if !res.status().is_success() { + return Err((StatusCode::BAD_GATEWAY, "Registry fetch failed".to_string())); + } + + let bundle: UploadPluginPackageRequest = res + .json() + .await + .map_err(|_| (StatusCode::BAD_GATEWAY, "Invalid registry response".to_string()))?; + + let parsed_manifest: PluginManifest = serde_json::from_value(bundle.manifest.clone()) + .map_err(|e| (StatusCode::BAD_GATEWAY, format!("Registry returned invalid manifest: {e}")))?; + + if parsed_manifest.name != bundle.name { + return Err(( + StatusCode::BAD_GATEWAY, + "Registry manifest name mismatch".to_string(), + )); + } + + if parsed_manifest.version != bundle.version { + return Err(( + StatusCode::BAD_GATEWAY, + "Registry manifest version mismatch".to_string(), + )); + } + + let publisher = bundle.publisher.unwrap_or_default(); + let wasm_bytes = decode_base64(&bundle.wasm_base64)?; + let wasm_sha256 = sha256_hex(&wasm_bytes); + + let signature_bytes = verify_signature_if_required( + trust_policy, + &trusted_publishers, + &publisher, + bundle.signature_base64.as_deref(), + &wasm_sha256, + )?; + + let package_id = sqlx::query( + r#" + INSERT INTO plugin_packages + (name, version, description, publisher, source, registry_url, wasm_sha256, wasm_bytes, manifest, signature) + VALUES + ($1, $2, $3, $4, 'registry', $5, $6, $7, $8, $9) + ON CONFLICT (name, version, publisher, wasm_sha256) + DO UPDATE SET + description = EXCLUDED.description, + manifest = EXCLUDED.manifest, + signature = EXCLUDED.signature, + registry_url = EXCLUDED.registry_url + RETURNING id + "#, + ) + .bind(&bundle.name) + .bind(&bundle.version) + .bind(&bundle.description) + .bind(&publisher) + .bind(url.as_str()) + .bind(&wasm_sha256) + .bind(&wasm_bytes) + .bind(&bundle.manifest) + .bind(&signature_bytes) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .try_get::("id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let row = sqlx::query( + r#" + INSERT INTO community_plugin_packages (community_id, package_id, installed_by, is_active) + VALUES ($1, $2, $3, true) + ON CONFLICT (community_id, package_id) + DO UPDATE SET is_active = EXCLUDED.is_active + RETURNING installed_at, is_active + "#, + ) + .bind(community_id) + .bind(package_id) + .bind(auth.user_id) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Deactivate other active versions of this plugin in THIS community. + let rows = sqlx::query( + r#" + SELECT cpp.package_id, COALESCE(cpp.settings,'{}'::jsonb) as settings + FROM community_plugin_packages cpp + JOIN plugin_packages pp ON pp.id = cpp.package_id + WHERE cpp.community_id = $1 + AND cpp.is_active = true + AND pp.name = $2 + AND cpp.package_id <> $3 + "#, + ) + .bind(community_id) + .bind(&bundle.name) + .bind(package_id) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + for r in rows { + let other_package_id: Uuid = r + .try_get("package_id") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let other_settings: Value = r + .try_get("settings") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let _ = sqlx::query( + "UPDATE community_plugin_packages SET is_active = false WHERE community_id = $1 AND package_id = $2", + ) + .bind(community_id) + .bind(other_package_id) + .execute(&pool) + .await; + + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + other_package_id, + true, + other_settings.clone(), + false, + other_settings, + ) + .await; + } + + // Activate lifecycle hook for the newly installed package. + plugins + .handle_community_plugin_package_change( + community_id, + Some(auth.user_id), + package_id, + false, + json!({}), + true, + json!({}), + ) + .await; + + let installed_at = row + .try_get::, _>("installed_at") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + let is_active = row + .try_get::("is_active") + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, NULL, 'plugin.package_installed', $3)"#, + community_id, + auth.user_id, + json!({"name": bundle.name, "version": bundle.version, "publisher": publisher, "sha256": wasm_sha256, "registry_url": url.as_str()}) + ) + .execute(&pool) + .await; + + Ok(Json(CommunityPluginPackageInfo { + package_id, + name: bundle.name, + version: bundle.version, + description: bundle.description, + publisher, + source: "registry".to_string(), + registry_url: Some(url.as_str().to_string()), + wasm_sha256, + manifest: bundle.manifest, + settings: json!({}), + signature_present: signature_bytes.is_some(), + installed_at, + is_active, + })) +} + +fn parse_trust_policy(settings: &Value) -> PluginTrustPolicy { + match settings + .get("plugin_trust_policy") + .and_then(|v| v.as_str()) + { + Some("unsigned_allowed") => PluginTrustPolicy::UnsignedAllowed, + _ => PluginTrustPolicy::SignedOnly, + } +} + +fn trust_policy_str(policy: PluginTrustPolicy) -> &'static str { + match policy { + PluginTrustPolicy::SignedOnly => "signed_only", + PluginTrustPolicy::UnsignedAllowed => "unsigned_allowed", + } +} + +fn parse_install_sources(settings: &Value) -> Vec { + let Some(arr) = settings.get("plugin_install_sources").and_then(|v| v.as_array()) else { + return vec![PluginInstallSource::Upload, PluginInstallSource::Registry]; + }; + + let mut sources: Vec = Vec::new(); + for v in arr { + match v.as_str() { + Some("upload") => sources.push(PluginInstallSource::Upload), + Some("registry") => sources.push(PluginInstallSource::Registry), + _ => {} + } + } + + sources.sort_by_key(|s| match s { + PluginInstallSource::Upload => 0, + PluginInstallSource::Registry => 1, + }); + sources.dedup(); + + if sources.is_empty() { + vec![PluginInstallSource::Upload, PluginInstallSource::Registry] + } else { + sources + } +} + +fn install_sources_json(sources: &[PluginInstallSource]) -> Value { + let mut v: Vec = sources + .iter() + .map(|s| match s { + PluginInstallSource::Upload => Value::String("upload".to_string()), + PluginInstallSource::Registry => Value::String("registry".to_string()), + }) + .collect(); + + v.sort_by(|a, b| a.as_str().unwrap_or("").cmp(b.as_str().unwrap_or(""))); + v.dedup(); + Value::Array(v) +} + +fn parse_bool(settings: &Value, key: &str, default: bool) -> bool { + settings.get(key).and_then(|v| v.as_bool()).unwrap_or(default) +} + +fn parse_string_list(settings: &Value, key: &str) -> Vec { + let Some(arr) = settings.get(key).and_then(|v| v.as_array()) else { + return Vec::new(); + }; + + let mut out: Vec = arr + .iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect(); + out.sort(); + out.dedup(); + out +} + +async fn list_community_plugins( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) if m.role == "admin" || m.role == "moderator" => {} + _ => return Err((StatusCode::FORBIDDEN, "Must be admin or moderator".to_string())), + } + + let rows = sqlx::query!( + r#" + SELECT + p.name, + p.version, + p.description, + p.is_core, + p.is_active as global_is_active, + COALESCE(cp.is_active, false) as "community_is_active!", + COALESCE(cp.settings, '{}'::jsonb) as "settings!: serde_json::Value", + p.settings_schema as "settings_schema: serde_json::Value" + FROM plugins p + LEFT JOIN community_plugins cp + ON cp.plugin_id = p.id AND cp.community_id = $1 + ORDER BY p.is_core DESC, p.name ASC + "#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json( + rows.into_iter() + .map(|r| CommunityPluginInfo { + name: r.name, + version: r.version, + description: r.description, + is_core: r.is_core, + global_is_active: r.global_is_active, + community_is_active: r.community_is_active, + settings: r.settings, + settings_schema: r.settings_schema, + }) + .collect(), + )) +} + +async fn update_community_plugin( + auth: AuthUser, + Path((community_id, plugin_name)): Path<(Uuid, String)>, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + if req.is_active.is_none() && req.settings.is_none() { + return Err(( + StatusCode::BAD_REQUEST, + "Must provide is_active and/or settings".to_string(), + )); + } + + let membership = sqlx::query!( + "SELECT role FROM community_members WHERE user_id = $1 AND community_id = $2", + auth.user_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + match membership { + Some(m) if m.role == "admin" || m.role == "moderator" => {} + _ => return Err((StatusCode::FORBIDDEN, "Must be admin or moderator".to_string())), + } + + let plugin = sqlx::query!( + "SELECT id, name, version, description, is_core, is_active, settings_schema FROM plugins WHERE name = $1", + plugin_name + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Plugin not found".to_string()))?; + + if !plugin.is_active { + return Err(( + StatusCode::CONFLICT, + "Plugin is disabled globally".to_string(), + )); + } + + if plugin.is_core { + if let Some(false) = req.is_active { + return Err(( + StatusCode::FORBIDDEN, + "Core plugins cannot be deactivated".to_string(), + )); + } + } + + if let Some(settings) = &req.settings { + if let Some(schema) = &plugin.settings_schema { + let compiled = JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(schema) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Invalid settings schema for plugin {}: {}", plugin.name, e), + ) + })?; + + if !compiled.is_valid(settings) { + let errors = compiled.validate(settings).err().unwrap(); + let msgs: Vec = errors.take(5).map(|e| e.to_string()).collect(); + return Err(( + StatusCode::BAD_REQUEST, + format!("Invalid settings: {}", msgs.join("; ")), + )); + } + } + } + + let old = sqlx::query!( + r#" + SELECT + is_active, + settings as "settings!: serde_json::Value" + FROM community_plugins + WHERE community_id = $1 AND plugin_id = $2 + "#, + community_id, + plugin.id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let old_is_active = old.as_ref().map(|r| r.is_active).unwrap_or(false); + let old_settings = old + .as_ref() + .map(|r| r.settings.clone()) + .unwrap_or_else(|| json!({})); + + let settings_to_apply = req.settings.clone(); + let is_active_to_apply = req.is_active; + + let row = sqlx::query!( + r#" + INSERT INTO community_plugins (community_id, plugin_id, settings, is_active) + VALUES ($1, $2, COALESCE($3, '{}'::jsonb), COALESCE($4, true)) + ON CONFLICT (community_id, plugin_id) + DO UPDATE SET + settings = COALESCE($3, community_plugins.settings), + is_active = COALESCE($4, community_plugins.is_active), + activated_at = CASE + WHEN COALESCE($4, community_plugins.is_active) = true AND community_plugins.is_active = false THEN NOW() + ELSE community_plugins.activated_at + END + RETURNING is_active as community_is_active, + settings as "settings!: serde_json::Value" + "#, + community_id, + plugin.id, + settings_to_apply, + is_active_to_apply + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let event_type = match is_active_to_apply { + Some(true) => Some("plugin.activated"), + Some(false) => Some("plugin.deactivated"), + None => None, + }; + + if let Some(event_type) = event_type { + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, $3, $4, $5)"#, + community_id, + auth.user_id, + plugin.name, + event_type, + serde_json::json!({}) + ) + .execute(&pool) + .await; + } + + if req.settings.is_some() { + let keys = redacted_settings_keys(req.settings.as_ref().unwrap()); + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, $3, 'plugin.settings_updated', $4)"#, + community_id, + auth.user_id, + plugin.name, + serde_json::json!({"keys": keys}) + ) + .execute(&pool) + .await; + } + + plugins + .handle_community_plugin_change( + community_id, + Some(auth.user_id), + &plugin.name, + old_is_active, + old_settings, + row.community_is_active, + row.settings.clone(), + ) + .await; + + Ok(Json(CommunityPluginInfo { + name: plugin.name, + version: plugin.version, + description: plugin.description, + is_core: plugin.is_core, + global_is_active: plugin.is_active, + community_is_active: row.community_is_active, + settings: row.settings, + settings_schema: plugin.settings_schema, + })) +} diff --git a/backend/src/api/proposals.rs b/backend/src/api/proposals.rs new file mode 100644 index 0000000..272d3dc --- /dev/null +++ b/backend/src/api/proposals.rs @@ -0,0 +1,1181 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Extension, + Json, Router, +}; +use serde::Deserialize; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::models::proposal::{CreateProposal, Proposal, ProposalOptionWithVotes, ProposalWithOptions}; +use crate::plugins::{HookContext, PluginError, PluginManager}; + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/proposals", get(list_all_proposals)) + .route("/api/proposals/my", get(my_proposals)) + .route("/api/communities/{community_id}/proposals", get(list_proposals).post(create_proposal)) + .route("/api/proposals/{id}", get(get_proposal).delete(delete_proposal).put(update_proposal)) + .route("/api/proposals/{id}/vote", post(cast_vote)) + .route("/api/proposals/{id}/vote/ranked", post(cast_ranked_vote)) + .route("/api/proposals/{id}/vote/quadratic", post(cast_quadratic_vote)) + .route("/api/proposals/{id}/vote/star", post(cast_star_vote)) + .route("/api/proposals/{id}/start-discussion", post(start_discussion)) + .route("/api/proposals/{id}/start-voting", post(start_voting)) + .route("/api/proposals/{id}/close-voting", post(close_voting)) + .route("/api/proposals/{id}/results", get(get_voting_results)) + .with_state(pool) +} + +use serde::Serialize; + +#[derive(Debug, Serialize)] +pub struct ProposalWithCommunity { + pub id: Uuid, + pub title: String, + pub description: String, + pub status: String, + pub community_name: String, + pub community_slug: String, + pub vote_count: i64, + pub comment_count: i64, + pub created_at: chrono::DateTime, +} + +async fn list_all_proposals( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let proposals = sqlx::query!( + r#" + SELECT p.id, p.title, p.description, p.status as "status: String", p.created_at, + c.name as community_name, c.slug as community_slug, + COALESCE((SELECT COUNT(*) FROM votes v JOIN proposal_options po ON v.option_id = po.id WHERE po.proposal_id = p.id), 0) as vote_count, + COALESCE((SELECT COUNT(*) FROM comments WHERE proposal_id = p.id), 0) as comment_count + FROM proposals p + JOIN communities c ON p.community_id = c.id + WHERE c.is_active = true + ORDER BY p.created_at DESC + LIMIT 50 + "# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = proposals.into_iter().map(|p| ProposalWithCommunity { + id: p.id, + title: p.title, + description: p.description, + status: p.status, + community_name: p.community_name, + community_slug: p.community_slug, + vote_count: p.vote_count.unwrap_or(0), + comment_count: p.comment_count.unwrap_or(0), + created_at: p.created_at, + }).collect(); + + Ok(Json(result)) +} + +async fn my_proposals( + auth: AuthUser, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let proposals = sqlx::query!( + r#" + SELECT p.id, p.title, p.description, p.status as "status: String", p.created_at, + c.name as community_name, c.slug as community_slug, + COALESCE((SELECT COUNT(*) FROM votes v JOIN proposal_options po ON v.option_id = po.id WHERE po.proposal_id = p.id), 0) as vote_count, + COALESCE((SELECT COUNT(*) FROM comments WHERE proposal_id = p.id), 0) as comment_count + FROM proposals p + JOIN communities c ON p.community_id = c.id + WHERE p.author_id = $1 AND c.is_active = true + ORDER BY p.created_at DESC + "#, + auth.user_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = proposals.into_iter().map(|p| ProposalWithCommunity { + id: p.id, + title: p.title, + description: p.description, + status: p.status, + community_name: p.community_name, + community_slug: p.community_slug, + vote_count: p.vote_count.unwrap_or(0), + comment_count: p.comment_count.unwrap_or(0), + created_at: p.created_at, + }).collect(); + + Ok(Json(result)) +} + +async fn list_proposals( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let proposals = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals + WHERE community_id = $1 + ORDER BY created_at DESC"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(proposals)) +} + +async fn create_proposal( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{require_permission, perms}; + + // Require proposal.create permission in community + require_permission(&pool, auth.user_id, perms::PROPOSAL_CREATE, Some(community_id)).await?; + + let filtered = plugins + .apply_filters( + "proposal.create", + HookContext { + pool: pool.clone(), + community_id: Some(community_id), + actor_user_id: Some(auth.user_id), + }, + serde_json::json!({ + "title": req.title, + "description": req.description, + "options": req.options, + }), + ) + .await + .map_err(|e| match e { + PluginError::Message(m) => (StatusCode::BAD_REQUEST, m), + PluginError::Sqlx(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()), + })?; + + let title = filtered + .get("title") + .and_then(|v| v.as_str()) + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid proposal.create filter output".to_string(), + ))? + .to_string(); + + let description = filtered + .get("description") + .and_then(|v| v.as_str()) + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid proposal.create filter output".to_string(), + ))? + .to_string(); + + let options = filtered + .get("options") + .and_then(|v| v.as_array()) + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid proposal.create filter output".to_string(), + ))? + .iter() + .map(|v| { + v.as_str() + .ok_or(( + StatusCode::BAD_REQUEST, + "Invalid proposal.create filter output".to_string(), + )) + .map(|s| s.to_string()) + }) + .collect::, (StatusCode, String)>>()?; + + // Get community's default voting method from voting plugin system + let default_voting_method: String = sqlx::query_scalar!( + r#"SELECT COALESCE( + (SELECT vm.name FROM community_voting_methods cvm + JOIN voting_method_plugins vm ON vm.id = cvm.voting_method_id + WHERE cvm.community_id = $1 AND cvm.is_default = true + LIMIT 1), + (SELECT name FROM voting_method_plugins WHERE is_default = true LIMIT 1), + 'approval' + ) as "method!""#, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e: sqlx::Error| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Start transaction + let mut tx = pool.begin().await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Create proposal with community's default voting method + let proposal = sqlx::query_as!( + Proposal, + r#"INSERT INTO proposals (community_id, author_id, title, description, voting_method) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id"#, + community_id, + auth.user_id, + title, + description, + default_voting_method + ) + .fetch_one(&mut *tx) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Create options + for (i, label) in options.iter().enumerate() { + sqlx::query!( + "INSERT INTO proposal_options (proposal_id, label, sort_order) VALUES ($1, $2, $3)", + proposal.id, + label, + i as i32 + ) + .execute(&mut *tx) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + + tx.commit().await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Proposal '{}' created by {}", proposal.title, auth.username); + Ok(Json(proposal)) +} + +async fn get_proposal( + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let proposal = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals WHERE id = $1"#, + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + let author = sqlx::query_scalar!("SELECT username FROM users WHERE id = $1", proposal.author_id) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let options = sqlx::query!( + r#"SELECT o.id, o.label, o.description, COUNT(v.id) as vote_count + FROM proposal_options o + LEFT JOIN votes v ON v.option_id = o.id + WHERE o.proposal_id = $1 + GROUP BY o.id + ORDER BY o.sort_order"#, + proposal_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .into_iter() + .map(|row| ProposalOptionWithVotes { + id: row.id, + label: row.label, + description: row.description, + vote_count: row.vote_count.unwrap_or(0), + }) + .collect(); + + Ok(Json(ProposalWithOptions { + proposal, + options, + author_name: author, + })) +} + +#[derive(Deserialize)] +pub struct VoteRequest { + pub option_ids: Vec, +} + +#[derive(Deserialize)] +pub struct RankedVoteRequest { + pub rankings: Vec, +} + +#[derive(Deserialize)] +pub struct RankedOption { + pub option_id: Uuid, + pub rank: i32, +} + +#[derive(Deserialize)] +pub struct QuadraticVoteRequest { + pub allocations: Vec, +} + +#[derive(Deserialize)] +pub struct QuadraticAllocation { + pub option_id: Uuid, + pub credits: i32, +} + +#[derive(Deserialize)] +pub struct StarVoteRequest { + pub ratings: Vec, +} + +#[derive(Deserialize)] +pub struct StarRating { + pub option_id: Uuid, + pub stars: i32, +} + +async fn cast_vote( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Extension(plugins): Extension>, + Json(req): Json, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{require_permission, perms}; + + let proposal = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals WHERE id = $1"#, + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + // Require vote.cast permission in community + require_permission(&pool, auth.user_id, perms::VOTE_CAST, Some(proposal.community_id)).await?; + + if !matches!(proposal.status, crate::models::ProposalStatus::Voting) { + return Err((StatusCode::BAD_REQUEST, "Proposal is not in voting phase".to_string())); + } + + let vote_payload = serde_json::json!({ + "proposal_id": proposal_id.to_string(), + "community_id": proposal.community_id.to_string(), + "voter_id": auth.user_id.to_string(), + "voter_name": auth.username, + "option_ids": req.option_ids.iter().map(|id| id.to_string()).collect::>(), + "voting_method": proposal.voting_method, + }); + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(proposal.community_id), + actor_user_id: Some(auth.user_id), + }; + let filtered = plugins + .apply_filters("vote.cast.validate", ctx, vote_payload) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if let Some(err) = filtered.get("_error").and_then(|v| v.as_str()) { + return Err((StatusCode::BAD_REQUEST, err.to_string())); + } + + let option_ids: Vec = filtered + .get("option_ids") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().and_then(|s| Uuid::parse_str(s).ok())) + .collect() + }) + .unwrap_or(req.option_ids); + + let voting_identity = sqlx::query_scalar!( + r#"INSERT INTO voting_identities (user_id, community_id, pseudonym) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, community_id) DO UPDATE SET user_id = $1 + RETURNING id"#, + auth.user_id, + proposal.community_id, + format!("voter-{}", uuid::Uuid::new_v4().to_string()[..8].to_string()) + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + sqlx::query!( + "DELETE FROM votes WHERE proposal_id = $1 AND voter_id = $2", + proposal_id, + voting_identity + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + for option_id in option_ids { + sqlx::query!( + "INSERT INTO votes (proposal_id, option_id, voter_id) VALUES ($1, $2, $3)", + proposal_id, + option_id, + voting_identity + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + + let ctx = HookContext { + pool: pool.clone(), + community_id: Some(proposal.community_id), + actor_user_id: Some(auth.user_id), + }; + let _ = plugins.do_action("vote.cast", ctx, serde_json::json!({ + "proposal_id": proposal_id.to_string(), + "voter_id": auth.user_id.to_string(), + })).await; + + Ok(Json(serde_json::json!({"status": "voted"}))) +} + +async fn cast_ranked_vote( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let proposal = sqlx::query!( + "SELECT community_id, status as \"status: crate::models::ProposalStatus\", voting_method FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + if !matches!(proposal.status, crate::models::ProposalStatus::Voting) { + return Err((StatusCode::BAD_REQUEST, "Proposal is not in voting phase".to_string())); + } + + if proposal.voting_method != "ranked_choice" { + return Err((StatusCode::BAD_REQUEST, "This proposal uses a different voting method".to_string())); + } + + let voting_identity = sqlx::query_scalar!( + r#"INSERT INTO voting_identities (user_id, community_id, pseudonym) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, community_id) DO UPDATE SET user_id = $1 + RETURNING id"#, + auth.user_id, + proposal.community_id, + format!("voter-{}", uuid::Uuid::new_v4().to_string()[..8].to_string()) + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Clear existing ranked votes + sqlx::query!("DELETE FROM ranked_votes WHERE proposal_id = $1 AND voter_id = $2", proposal_id, voting_identity) + .execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Insert ranked votes + for ranking in req.rankings { + sqlx::query!( + "INSERT INTO ranked_votes (proposal_id, voter_id, option_id, rank) VALUES ($1, $2, $3, $4)", + proposal_id, voting_identity, ranking.option_id, ranking.rank + ).execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + + Ok(Json(serde_json::json!({"status": "voted", "method": "ranked_choice"}))) +} + +async fn cast_quadratic_vote( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + use crate::voting::quadratic::{vote_cost, max_votes_for_credits}; + + let proposal = sqlx::query!( + "SELECT community_id, status as \"status: crate::models::ProposalStatus\", voting_method FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + if !matches!(proposal.status, crate::models::ProposalStatus::Voting) { + return Err((StatusCode::BAD_REQUEST, "Proposal is not in voting phase".to_string())); + } + + if proposal.voting_method != "quadratic" { + return Err((StatusCode::BAD_REQUEST, "This proposal uses a different voting method".to_string())); + } + + // Validate using quadratic voting module + let total_credits = 100; + let total_cost: i32 = req.allocations.iter().map(|a| vote_cost(a.credits)).sum(); + if total_cost > total_credits { + let max_single = max_votes_for_credits(total_credits); + return Err((StatusCode::BAD_REQUEST, format!( + "Total cost {} exceeds {} credits. Max votes on single option: {}", + total_cost, total_credits, max_single + ))); + } + + let voting_identity = sqlx::query_scalar!( + r#"INSERT INTO voting_identities (user_id, community_id, pseudonym) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, community_id) DO UPDATE SET user_id = $1 + RETURNING id"#, + auth.user_id, + proposal.community_id, + format!("voter-{}", uuid::Uuid::new_v4().to_string()[..8].to_string()) + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Clear existing quadratic votes + sqlx::query!("DELETE FROM quadratic_votes WHERE proposal_id = $1 AND voter_id = $2", proposal_id, voting_identity) + .execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Insert quadratic votes + for alloc in req.allocations { + if alloc.credits > 0 { + sqlx::query!( + "INSERT INTO quadratic_votes (proposal_id, voter_id, option_id, credits) VALUES ($1, $2, $3, $4)", + proposal_id, voting_identity, alloc.option_id, alloc.credits + ).execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + } + + Ok(Json(serde_json::json!({"status": "voted", "method": "quadratic", "credits_used": total_cost}))) +} + +async fn cast_star_vote( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let proposal = sqlx::query!( + "SELECT community_id, status as \"status: crate::models::ProposalStatus\", voting_method FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + if !matches!(proposal.status, crate::models::ProposalStatus::Voting) { + return Err((StatusCode::BAD_REQUEST, "Proposal is not in voting phase".to_string())); + } + + if proposal.voting_method != "star" { + return Err((StatusCode::BAD_REQUEST, "This proposal uses a different voting method".to_string())); + } + + // Validate star ratings (0-5) + for rating in &req.ratings { + if rating.stars < 0 || rating.stars > 5 { + return Err((StatusCode::BAD_REQUEST, "Star ratings must be between 0 and 5".to_string())); + } + } + + let voting_identity = sqlx::query_scalar!( + r#"INSERT INTO voting_identities (user_id, community_id, pseudonym) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, community_id) DO UPDATE SET user_id = $1 + RETURNING id"#, + auth.user_id, + proposal.community_id, + format!("voter-{}", uuid::Uuid::new_v4().to_string()[..8].to_string()) + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Clear existing star votes + sqlx::query!("DELETE FROM star_votes WHERE proposal_id = $1 AND voter_id = $2", proposal_id, voting_identity) + .execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Insert star votes + for rating in req.ratings { + sqlx::query!( + "INSERT INTO star_votes (proposal_id, voter_id, option_id, stars) VALUES ($1, $2, $3, $4)", + proposal_id, voting_identity, rating.option_id, rating.stars + ).execute(&pool).await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + } + + Ok(Json(serde_json::json!({"status": "voted", "method": "star"}))) +} + +async fn start_discussion( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let proposal = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals WHERE id = $1"#, + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + if proposal.author_id != auth.user_id { + return Err((StatusCode::FORBIDDEN, "Only the author can start discussion".to_string())); + } + + let updated = sqlx::query_as!( + Proposal, + r#"UPDATE proposals + SET status = 'discussion' + WHERE id = $1 + RETURNING id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id"#, + proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(updated)) +} + +async fn start_voting( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let proposal = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals WHERE id = $1"#, + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + if proposal.author_id != auth.user_id { + return Err((StatusCode::FORBIDDEN, "Only the author can start voting".to_string())); + } + + let updated = sqlx::query_as!( + Proposal, + r#"UPDATE proposals + SET status = 'voting', voting_starts_at = NOW() + WHERE id = $1 + RETURNING id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id"#, + proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(updated)) +} + +async fn close_voting( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{user_has_permission, perms}; + + let proposal = sqlx::query_as!( + Proposal, + r#"SELECT id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id + FROM proposals WHERE id = $1"#, + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + // Check if user can manage status: author or users with manage_status permission + let is_author = proposal.author_id == auth.user_id; + let can_manage = user_has_permission(&pool, auth.user_id, perms::PROPOSAL_MANAGE_STATUS, Some(proposal.community_id)).await?; + + if !is_author && !can_manage { + return Err((StatusCode::FORBIDDEN, "Only the author or admins can close voting".to_string())); + } + + if !matches!(proposal.status, crate::models::ProposalStatus::Voting) { + return Err((StatusCode::BAD_REQUEST, "Proposal is not in voting phase".to_string())); + } + + let updated = sqlx::query_as!( + Proposal, + r#"UPDATE proposals + SET status = 'closed', voting_ends_at = NOW() + WHERE id = $1 + RETURNING id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id"#, + proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Voting closed for proposal '{}'", proposal.title); + Ok(Json(updated)) +} + +async fn delete_proposal( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{user_has_permission, perms}; + + let proposal = sqlx::query!( + "SELECT author_id, community_id, status as \"status: crate::models::ProposalStatus\", title FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + // Check if user can delete: author needs delete_own, others need delete_any + let is_author = proposal.author_id == auth.user_id; + let can_delete_own = user_has_permission(&pool, auth.user_id, perms::PROPOSAL_DELETE_OWN, Some(proposal.community_id)).await?; + let can_delete_any = user_has_permission(&pool, auth.user_id, perms::PROPOSAL_DELETE_ANY, Some(proposal.community_id)).await?; + + if is_author && !can_delete_own { + return Err((StatusCode::FORBIDDEN, "You don't have permission to delete proposals".to_string())); + } + if !is_author && !can_delete_any { + return Err((StatusCode::FORBIDDEN, "Only the author or admins can delete this proposal".to_string())); + } + + if !matches!(proposal.status, crate::models::ProposalStatus::Draft) { + return Err((StatusCode::BAD_REQUEST, "Only draft proposals can be deleted".to_string())); + } + + // Delete related data first + sqlx::query!("DELETE FROM proposal_options WHERE proposal_id = $1", proposal_id) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + sqlx::query!("DELETE FROM comments WHERE proposal_id = $1", proposal_id) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + sqlx::query!("DELETE FROM proposals WHERE id = $1", proposal_id) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Proposal '{}' deleted", proposal.title); + Ok(Json(serde_json::json!({ "status": "deleted" }))) +} + +#[derive(Debug, Deserialize)] +pub struct UpdateProposal { + pub title: Option, + pub description: Option, +} + +async fn update_proposal( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, + Json(payload): Json, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{user_has_permission, perms}; + + let proposal = sqlx::query!( + "SELECT author_id, community_id, status as \"status: crate::models::ProposalStatus\" FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + // Check edit permissions: author needs edit_own, others need edit_any + let is_author = proposal.author_id == auth.user_id; + let can_edit_own = user_has_permission(&pool, auth.user_id, perms::PROPOSAL_EDIT_OWN, Some(proposal.community_id)).await?; + let can_edit_any = user_has_permission(&pool, auth.user_id, perms::PROPOSAL_EDIT_ANY, Some(proposal.community_id)).await?; + + if is_author && !can_edit_own { + return Err((StatusCode::FORBIDDEN, "You don't have permission to edit proposals".to_string())); + } + if !is_author && !can_edit_any { + return Err((StatusCode::FORBIDDEN, "Only the author or admins can edit this proposal".to_string())); + } + + if !matches!(proposal.status, crate::models::ProposalStatus::Draft | crate::models::ProposalStatus::Discussion) { + return Err((StatusCode::BAD_REQUEST, "Can only edit proposals in draft or discussion phase".to_string())); + } + + let updated = sqlx::query_as!( + Proposal, + r#"UPDATE proposals + SET title = COALESCE($1, title), + description = COALESCE($2, description), + updated_at = NOW() + WHERE id = $3 + RETURNING id, community_id, author_id, title, description, + status as "status: _", voting_method, voting_starts_at, voting_ends_at, + created_at, updated_at, deliberation_phase as "deliberation_phase: _", + inform_starts_at, inform_ends_at, discuss_starts_at, discuss_ends_at, + min_read_time_seconds, facilitator_id"#, + payload.title, + payload.description, + proposal_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + tracing::info!("Proposal '{}' updated", updated.title); + Ok(Json(updated)) +} + +#[derive(Debug, Serialize)] +pub struct VotingResultsResponse { + pub proposal_id: Uuid, + pub voting_method: String, + pub total_votes: i64, + pub winner: Option, + pub results: Vec, + pub details: serde_json::Value, +} + +#[derive(Debug, Clone, Serialize)] +pub struct OptionResult { + pub option_id: Uuid, + pub label: String, + pub votes: i64, + pub percentage: f64, + pub rank: i32, +} + +async fn get_voting_results( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + use crate::api::permissions::{require_permission, perms}; + + let proposal = sqlx::query!( + "SELECT id, community_id, voting_method, status as \"status: crate::models::ProposalStatus\" FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Proposal not found".to_string()))?; + + // Require permission to view voting results + require_permission(&pool, auth.user_id, perms::VOTE_VIEW_RESULTS, Some(proposal.community_id)).await?; + + let options: Vec<(Uuid, String)> = sqlx::query!( + "SELECT id, label FROM proposal_options WHERE proposal_id = $1 ORDER BY sort_order", + proposal_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .into_iter() + .map(|r| (r.id, r.label)) + .collect(); + + let voting_method = proposal.voting_method.as_str(); + + let (results, total_votes, details) = match voting_method { + "approval" => calculate_approval_results(&pool, proposal_id, &options).await?, + "ranked_choice" | "schulze" => calculate_ranked_results(&pool, proposal_id, &options, voting_method).await?, + "star" => calculate_star_results(&pool, proposal_id, &options).await?, + "quadratic" => calculate_quadratic_results(&pool, proposal_id, &options).await?, + _ => calculate_approval_results(&pool, proposal_id, &options).await?, + }; + + let winner = results.first().cloned(); + + Ok(Json(VotingResultsResponse { + proposal_id, + voting_method: proposal.voting_method, + total_votes, + winner, + results, + details, + })) +} + +async fn calculate_approval_results( + pool: &PgPool, + proposal_id: Uuid, + options: &[(Uuid, String)], +) -> Result<(Vec, i64, serde_json::Value), (StatusCode, String)> { + let vote_counts = sqlx::query!( + r#"SELECT option_id, COUNT(*) as count + FROM votes WHERE proposal_id = $1 + GROUP BY option_id"#, + proposal_id + ) + .fetch_all(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let total_voters = sqlx::query_scalar!( + "SELECT COUNT(DISTINCT voter_id) FROM votes WHERE proposal_id = $1", + proposal_id + ) + .fetch_one(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + let mut results: Vec = options.iter().map(|(opt_id, label)| { + let votes = vote_counts.iter() + .find(|v| v.option_id == *opt_id) + .map(|v| v.count.unwrap_or(0)) + .unwrap_or(0); + let percentage = if total_voters > 0 { + (votes as f64 / total_voters as f64) * 100.0 + } else { 0.0 }; + + OptionResult { + option_id: *opt_id, + label: label.clone(), + votes, + percentage, + rank: 0, + } + }).collect(); + + results.sort_by(|a, b| b.votes.cmp(&a.votes)); + for (i, r) in results.iter_mut().enumerate() { + r.rank = (i + 1) as i32; + } + + Ok((results, total_voters, serde_json::json!({"method": "approval"}))) +} + +async fn calculate_ranked_results( + pool: &PgPool, + proposal_id: Uuid, + options: &[(Uuid, String)], + method: &str, +) -> Result<(Vec, i64, serde_json::Value), (StatusCode, String)> { + use crate::voting::{schulze, ranked_choice}; + + let ballots_raw = sqlx::query!( + r#"SELECT voter_id, option_id, rank FROM ranked_votes + WHERE proposal_id = $1 ORDER BY voter_id, rank"#, + proposal_id + ) + .fetch_all(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let option_ids: Vec = options.iter().map(|(id, _)| *id).collect(); + + // Group ballots by voter + let mut voter_ballots: std::collections::HashMap> = std::collections::HashMap::new(); + for b in &ballots_raw { + voter_ballots.entry(b.voter_id).or_default().push((b.option_id, b.rank)); + } + + let total_voters = voter_ballots.len() as i64; + + let result = if method == "schulze" { + let ballots: Vec = voter_ballots.values().map(|rankings| { + schulze::RankedBallot { + rankings: rankings.iter().map(|(id, rank)| (*id, *rank as usize)).collect() + } + }).collect(); + schulze::calculate(&option_ids, &ballots) + } else { + let ballots: Vec = voter_ballots.values().map(|rankings| { + let mut sorted = rankings.clone(); + sorted.sort_by_key(|(_, rank)| *rank); + ranked_choice::RankedBallot { + rankings: sorted.iter().map(|(id, _)| *id).collect() + } + }).collect(); + ranked_choice::calculate(&option_ids, &ballots) + }; + + let results: Vec = result.ranking.iter().map(|r| { + let label = options.iter() + .find(|(id, _)| *id == r.option_id) + .map(|(_, l)| l.clone()) + .unwrap_or_default(); + OptionResult { + option_id: r.option_id, + label, + votes: r.score as i64, + percentage: if total_voters > 0 { (r.score / total_voters as f64) * 100.0 } else { 0.0 }, + rank: r.rank as i32, + } + }).collect(); + + Ok((results, total_voters, serde_json::to_value(&result.details).unwrap_or_default())) +} + +async fn calculate_star_results( + pool: &PgPool, + proposal_id: Uuid, + options: &[(Uuid, String)], +) -> Result<(Vec, i64, serde_json::Value), (StatusCode, String)> { + use crate::voting::star; + + let votes_raw = sqlx::query!( + r#"SELECT voter_id, option_id, stars FROM star_votes WHERE proposal_id = $1"#, + proposal_id + ) + .fetch_all(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let option_ids: Vec = options.iter().map(|(id, _)| *id).collect(); + + // Group by voter + let mut voter_scores: std::collections::HashMap> = std::collections::HashMap::new(); + for v in &votes_raw { + voter_scores.entry(v.voter_id).or_default().push((v.option_id, v.stars)); + } + + let ballots: Vec = voter_scores.values().map(|scores| { + star::ScoreBallot { + scores: scores.clone() + } + }).collect(); + + let total_voters = ballots.len() as i64; + let result = star::calculate(&option_ids, &ballots); + + let results: Vec = result.ranking.iter().map(|r| { + let label = options.iter() + .find(|(id, _)| *id == r.option_id) + .map(|(_, l)| l.clone()) + .unwrap_or_default(); + OptionResult { + option_id: r.option_id, + label, + votes: r.score as i64, + percentage: 0.0, + rank: r.rank as i32, + } + }).collect(); + + Ok((results, total_voters, serde_json::to_value(&result.details).unwrap_or_default())) +} + +async fn calculate_quadratic_results( + pool: &PgPool, + proposal_id: Uuid, + options: &[(Uuid, String)], +) -> Result<(Vec, i64, serde_json::Value), (StatusCode, String)> { + use crate::voting::quadratic; + + let votes_raw = sqlx::query!( + r#"SELECT voter_id, option_id, credits FROM quadratic_votes WHERE proposal_id = $1"#, + proposal_id + ) + .fetch_all(pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let option_ids: Vec = options.iter().map(|(id, _)| *id).collect(); + + // Group by voter to build ballots + let mut voter_allocations: std::collections::HashMap> = std::collections::HashMap::new(); + for v in &votes_raw { + voter_allocations.entry(v.voter_id).or_default().push((v.option_id, v.credits)); + } + + // Convert to QuadraticBallot format (100 credits per voter) + let ballots: Vec = voter_allocations.values().map(|allocs| { + quadratic::QuadraticBallot { + total_credits: 100, // Standard credit allocation + allocations: allocs.clone(), + } + }).collect(); + + let total_voters = ballots.len() as i64; + let result = quadratic::calculate(&option_ids, &ballots); + + let results: Vec = result.ranking.iter().map(|r| { + let label = options.iter() + .find(|(id, _)| *id == r.option_id) + .map(|(_, l)| l.clone()) + .unwrap_or_default(); + OptionResult { + option_id: r.option_id, + label, + votes: r.score as i64, + percentage: if total_voters > 0 { (r.score / total_voters as f64) * 100.0 } else { 0.0 }, + rank: r.rank as i32, + } + }).collect(); + + Ok((results, total_voters, serde_json::to_value(&result.details).unwrap_or_default())) +} diff --git a/backend/src/api/roles.rs b/backend/src/api/roles.rs new file mode 100644 index 0000000..441a36e --- /dev/null +++ b/backend/src/api/roles.rs @@ -0,0 +1,426 @@ +//! Roles and Permissions API +//! +//! Provides granular access control with default roles and custom role support. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post, delete}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Clone, Serialize, Deserialize, sqlx::Type)] +#[sqlx(type_name = "permission_category", rename_all = "lowercase")] +pub enum PermissionCategory { + Platform, + Community, + Proposals, + Voting, + Moderation, + Plugins, + Users, + Integrations, +} + +#[derive(Debug, Serialize)] +pub struct Permission { + pub id: Uuid, + pub name: String, + pub category: PermissionCategory, + pub description: Option, + pub is_system: bool, +} + +#[derive(Debug, Serialize)] +pub struct Role { + pub id: Uuid, + pub name: String, + pub display_name: String, + pub description: Option, + pub color: Option, + pub community_id: Option, + pub is_system: bool, + pub is_default: bool, + pub priority: i32, + pub permissions: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct CreateRoleRequest { + pub name: String, + pub display_name: String, + pub description: Option, + pub color: Option, + pub is_default: Option, + pub priority: Option, + pub permissions: Vec, +} + +/// Request to update a role. Designed for PUT endpoint. +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +pub struct UpdateRoleRequest { + pub display_name: Option, + pub description: Option, + pub color: Option, + pub is_default: Option, + pub priority: Option, + pub permissions: Option>, +} + +#[derive(Debug, Deserialize)] +pub struct AssignRoleRequest { + pub user_id: Uuid, + pub expires_at: Option>, +} + +/// User with their assigned roles. Designed for user role listing. +#[allow(dead_code)] +#[derive(Debug, Serialize)] +pub struct UserWithRoles { + pub user_id: Uuid, + pub username: String, + pub roles: Vec, +} + +#[derive(Debug, Serialize)] +pub struct RoleSummary { + pub id: Uuid, + pub name: String, + pub display_name: String, + pub color: Option, +} + +// ============================================================================ +// Permission Handlers +// ============================================================================ + +/// List all permissions +async fn list_permissions( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let perms = sqlx::query!( + r#"SELECT id, name, category as "category: PermissionCategory", description, is_system + FROM permissions ORDER BY category, name"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(perms.into_iter().map(|p| Permission { + id: p.id, + name: p.name, + category: p.category, + description: p.description, + is_system: p.is_system, + }).collect())) +} + +// ============================================================================ +// Platform Role Handlers +// ============================================================================ + +/// List platform roles +async fn list_platform_roles( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let roles = sqlx::query!( + r#"SELECT r.id, r.name, r.display_name, r.description, r.color, + r.is_system, r.is_default, r.priority, + ARRAY_AGG(p.name) FILTER (WHERE p.name IS NOT NULL) as permissions + FROM roles r + LEFT JOIN role_permissions rp ON r.id = rp.role_id AND rp.granted = TRUE + LEFT JOIN permissions p ON rp.permission_id = p.id + WHERE r.community_id IS NULL + GROUP BY r.id + ORDER BY r.priority DESC"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(roles.into_iter().map(|r| Role { + id: r.id, + name: r.name, + display_name: r.display_name, + description: r.description, + color: r.color, + community_id: None, + is_system: r.is_system, + is_default: r.is_default, + priority: r.priority, + permissions: r.permissions.unwrap_or_default(), + }).collect())) +} + +// ============================================================================ +// Community Role Handlers +// ============================================================================ + +/// List roles for a community +async fn list_community_roles( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let roles = sqlx::query!( + r#"SELECT r.id, r.name, r.display_name, r.description, r.color, + r.is_system, r.is_default, r.priority, + ARRAY_AGG(p.name) FILTER (WHERE p.name IS NOT NULL) as permissions + FROM roles r + LEFT JOIN role_permissions rp ON r.id = rp.role_id AND rp.granted = TRUE + LEFT JOIN permissions p ON rp.permission_id = p.id + WHERE r.community_id = $1 + GROUP BY r.id + ORDER BY r.priority DESC"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(roles.into_iter().map(|r| Role { + id: r.id, + name: r.name, + display_name: r.display_name, + description: r.description, + color: r.color, + community_id: Some(community_id), + is_system: r.is_system, + is_default: r.is_default, + priority: r.priority, + permissions: r.permissions.unwrap_or_default(), + }).collect())) +} + +/// Create a community role +async fn create_community_role( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check permission + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, 'community.roles.manage', $2)", + auth.user_id, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if !has_perm { + return Err((StatusCode::FORBIDDEN, "No permission to manage roles".to_string())); + } + + // Create role + let role = sqlx::query!( + r#"INSERT INTO roles (name, display_name, description, color, community_id, is_default, priority) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING id, name, display_name, description, color, is_system, is_default, priority"#, + req.name, + req.display_name, + req.description, + req.color, + community_id, + req.is_default.unwrap_or(false), + req.priority.unwrap_or(0) + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Assign permissions + for perm_name in &req.permissions { + sqlx::query!( + r#"INSERT INTO role_permissions (role_id, permission_id, granted) + SELECT $1, p.id, TRUE FROM permissions p WHERE p.name = $2 + ON CONFLICT (role_id, permission_id) DO UPDATE SET granted = TRUE"#, + role.id, + perm_name + ) + .execute(&pool) + .await + .ok(); + } + + Ok(Json(Role { + id: role.id, + name: role.name, + display_name: role.display_name, + description: role.description, + color: role.color, + community_id: Some(community_id), + is_system: role.is_system, + is_default: role.is_default, + priority: role.priority, + permissions: req.permissions, + })) +} + +/// Assign role to user +async fn assign_role( + auth: AuthUser, + Path((community_id, role_id)): Path<(Uuid, Uuid)>, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check permission + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, 'community.roles.manage', $2)", + auth.user_id, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if !has_perm { + return Err((StatusCode::FORBIDDEN, "No permission to manage roles".to_string())); + } + + // Verify role belongs to community + let _role = sqlx::query!( + "SELECT id FROM roles WHERE id = $1 AND (community_id = $2 OR community_id IS NULL)", + role_id, + community_id + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "Role not found".to_string()))?; + + // Assign role + sqlx::query!( + r#"INSERT INTO user_roles (user_id, role_id, community_id, granted_by, expires_at) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (user_id, role_id, community_id) DO UPDATE SET + granted_by = $4, expires_at = $5, granted_at = NOW()"#, + req.user_id, + role_id, + community_id, + auth.user_id, + req.expires_at + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": true}))) +} + +/// Remove role from user +async fn remove_role( + auth: AuthUser, + Path((community_id, role_id, user_id)): Path<(Uuid, Uuid, Uuid)>, + State(pool): State, +) -> Result, (StatusCode, String)> { + // Check permission + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, 'community.roles.manage', $2)", + auth.user_id, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if !has_perm { + return Err((StatusCode::FORBIDDEN, "No permission to manage roles".to_string())); + } + + sqlx::query!( + "DELETE FROM user_roles WHERE user_id = $1 AND role_id = $2 AND community_id = $3", + user_id, + role_id, + community_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({"success": true}))) +} + +/// Get user's roles in a community +async fn get_user_roles( + Path((community_id, user_id)): Path<(Uuid, Uuid)>, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let roles = sqlx::query!( + r#"SELECT r.id, r.name, r.display_name, r.color + FROM roles r + JOIN user_roles ur ON r.id = ur.role_id + WHERE ur.user_id = $1 + AND (ur.community_id = $2 OR ur.community_id IS NULL) + AND (ur.expires_at IS NULL OR ur.expires_at > NOW()) + ORDER BY r.priority DESC"#, + user_id, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(roles.into_iter().map(|r| RoleSummary { + id: r.id, + name: r.name, + display_name: r.display_name, + color: r.color, + }).collect())) +} + +/// Check if current user has a specific permission +async fn check_permission( + auth: AuthUser, + Path((community_id, permission_name)): Path<(Uuid, String)>, + State(pool): State, +) -> Result, (StatusCode, String)> { + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, $2, $3)", + auth.user_id, + permission_name, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + Ok(Json(serde_json::json!({ + "has_permission": has_perm, + "permission": permission_name + }))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Permissions + .route("/api/permissions", get(list_permissions)) + // Platform roles + .route("/api/roles", get(list_platform_roles)) + // Community roles + .route("/api/communities/{community_id}/roles", get(list_community_roles).post(create_community_role)) + .route("/api/communities/{community_id}/roles/{role_id}/assign", post(assign_role)) + .route("/api/communities/{community_id}/roles/{role_id}/users/{user_id}", delete(remove_role)) + .route("/api/communities/{community_id}/users/{user_id}/roles", get(get_user_roles)) + .route("/api/communities/{community_id}/permissions/{permission_name}/check", get(check_permission)) + .with_state(pool) +} diff --git a/backend/src/api/self_moderation.rs b/backend/src/api/self_moderation.rs new file mode 100644 index 0000000..50d26b0 --- /dev/null +++ b/backend/src/api/self_moderation.rs @@ -0,0 +1,186 @@ +//! Self-Moderation API endpoints (community rules and violations). + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::Deserialize; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::self_moderation::{ + CommunityRule, ModerationRulesService, +}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct CreateRuleRequest { + pub code: String, + pub title: String, + pub description: String, + pub severity: String, +} + +#[derive(Debug, Deserialize)] +pub struct LiftSanctionRequest { + pub reason: String, +} + +#[derive(Debug, Deserialize)] +pub struct ReportViolationRequest { + pub rule_id: Uuid, + pub target_user_id: Uuid, + pub reason: String, + pub evidence: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ReviewViolationRequest { + pub confirmed: bool, + pub notes: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Get community rules +async fn get_community_rules( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let rules = ModerationRulesService::get_community_rules(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(rules)) +} + +/// Create a community rule +async fn create_rule( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let rule_id = ModerationRulesService::create_rule( + &pool, + community_id, + &req.code, + &req.title, + &req.description, + &req.severity, + auth.user_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": rule_id}))) +} + +/// Report a rule violation +async fn report_violation( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let violation_id = ModerationRulesService::report_violation( + &pool, + community_id, + req.rule_id, + req.target_user_id, + Some(auth.user_id), + &req.reason, + req.evidence, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": violation_id}))) +} + +/// Get pending violations for review +async fn get_pending_violations( + _auth: AuthUser, + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let violations = ModerationRulesService::get_pending_violations(&pool, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(violations)) +} + +/// Review a violation +async fn review_violation( + auth: AuthUser, + Path(violation_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ModerationRulesService::review_violation( + &pool, + violation_id, + auth.user_id, + req.confirmed, + req.notes.as_deref(), + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +/// Get user moderation summary +async fn get_user_summary( + _auth: AuthUser, + Path((community_id, user_id)): Path<(Uuid, Uuid)>, + State(pool): State, +) -> Result, (StatusCode, String)> { + let summary = ModerationRulesService::get_user_summary(&pool, user_id, community_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(summary)) +} + +/// Lift a sanction +async fn lift_sanction( + auth: AuthUser, + Path(sanction_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + ModerationRulesService::lift_sanction(&pool, sanction_id, auth.user_id, &req.reason) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": true}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Rules + .route("/api/communities/{community_id}/rules", get(get_community_rules).post(create_rule)) + // Violations + .route("/api/communities/{community_id}/violations", get(get_pending_violations).post(report_violation)) + .route("/api/violations/{violation_id}/review", post(review_violation)) + // User summary + .route("/api/communities/{community_id}/users/{user_id}/moderation", get(get_user_summary)) + // Sanctions + .route("/api/sanctions/{sanction_id}/lift", post(lift_sanction)) + .with_state(pool) +} diff --git a/backend/src/api/settings.rs b/backend/src/api/settings.rs new file mode 100644 index 0000000..e931ad3 --- /dev/null +++ b/backend/src/api/settings.rs @@ -0,0 +1,358 @@ +//! Instance and community settings API endpoints. + +use axum::{ + extract::{Path, State}, + routing::{get, patch, post}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use super::permissions::{require_permission, require_any_permission, perms}; +use axum::http::StatusCode; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct SetupStatus { + pub setup_required: bool, + pub instance_name: Option, +} + +#[derive(Debug, Serialize)] +pub struct InstanceSettings { + pub id: Uuid, + pub setup_completed: bool, + pub instance_name: String, + pub platform_mode: String, + pub registration_enabled: bool, + pub registration_mode: String, + pub default_community_visibility: String, + pub allow_private_communities: bool, + pub default_plugin_policy: String, + pub default_moderation_mode: String, +} + +#[derive(Debug, Deserialize)] +pub struct SetupRequest { + pub instance_name: String, + pub platform_mode: String, + #[serde(default)] + pub single_community_name: Option, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateInstanceRequest { + #[serde(default)] + pub instance_name: Option, + #[serde(default)] + pub platform_mode: Option, + #[serde(default)] + pub registration_enabled: Option, + #[serde(default)] + pub registration_mode: Option, +} + +#[derive(Debug, Serialize)] +pub struct CommunitySettings { + pub community_id: Uuid, + pub membership_mode: String, + pub moderation_mode: String, + pub governance_model: String, + pub plugin_policy: String, + pub features_enabled: Value, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateCommunitySettingsRequest { + #[serde(default)] + pub membership_mode: Option, + #[serde(default)] + pub moderation_mode: Option, + #[serde(default)] + pub governance_model: Option, + #[serde(default)] + pub plugin_policy: Option, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// Check if setup is required (public endpoint) +async fn get_setup_status(State(pool): State) -> Result, String> { + let row = sqlx::query!( + "SELECT setup_completed, instance_name FROM instance_settings LIMIT 1" + ) + .fetch_optional(&pool) + .await + .map_err(|e| e.to_string())?; + + match row { + Some(r) => Ok(Json(SetupStatus { + setup_required: !r.setup_completed, + instance_name: Some(r.instance_name), + })), + None => Ok(Json(SetupStatus { + setup_required: true, + instance_name: None, + })), + } +} + +/// Complete initial setup +async fn complete_setup( + State(pool): State, + auth: AuthUser, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check platform admin permission + require_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + // Check if already set up + let existing = sqlx::query!("SELECT setup_completed FROM instance_settings LIMIT 1") + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + if existing.map(|e| e.setup_completed).unwrap_or(false) { + return Err((StatusCode::BAD_REQUEST, "Setup already completed".to_string())); + } + + // Handle single_community mode + let single_community_id: Option = if req.platform_mode == "single_community" { + let name = req.single_community_name.as_deref().unwrap_or("Main Community"); + let community = sqlx::query!( + r#"INSERT INTO communities (name, slug, description, is_active, created_by) + VALUES ($1, $2, $3, true, $4) + RETURNING id"#, + name, + slug::slugify(name), + format!("The {} community", name), + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + Some(community.id) + } else { + None + }; + + // Update settings + let settings = sqlx::query!( + r#"UPDATE instance_settings SET + setup_completed = true, + setup_completed_at = NOW(), + setup_completed_by = $1, + instance_name = $2, + platform_mode = $3, + single_community_id = $4 + RETURNING id, setup_completed, instance_name, platform_mode, + registration_enabled, registration_mode, + default_community_visibility, allow_private_communities, + default_plugin_policy, default_moderation_mode"#, + auth.user_id, + req.instance_name, + req.platform_mode, + single_community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(InstanceSettings { + id: settings.id, + setup_completed: settings.setup_completed, + instance_name: settings.instance_name, + platform_mode: settings.platform_mode, + registration_enabled: settings.registration_enabled, + registration_mode: settings.registration_mode, + default_community_visibility: settings.default_community_visibility, + allow_private_communities: settings.allow_private_communities, + default_plugin_policy: settings.default_plugin_policy, + default_moderation_mode: settings.default_moderation_mode, + })) +} + +/// Get instance settings (admin only) +async fn get_instance_settings( + State(pool): State, + auth: AuthUser, +) -> Result, (StatusCode, String)> { + // Check platform settings permission + require_permission(&pool, auth.user_id, perms::PLATFORM_SETTINGS, None).await?; + + let s = sqlx::query!( + r#"SELECT id, setup_completed, instance_name, platform_mode, + registration_enabled, registration_mode, + default_community_visibility, allow_private_communities, + default_plugin_policy, default_moderation_mode + FROM instance_settings LIMIT 1"# + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(InstanceSettings { + id: s.id, + setup_completed: s.setup_completed, + instance_name: s.instance_name, + platform_mode: s.platform_mode, + registration_enabled: s.registration_enabled, + registration_mode: s.registration_mode, + default_community_visibility: s.default_community_visibility, + allow_private_communities: s.allow_private_communities, + default_plugin_policy: s.default_plugin_policy, + default_moderation_mode: s.default_moderation_mode, + })) +} + +/// Update instance settings (admin only) +async fn update_instance_settings( + State(pool): State, + auth: AuthUser, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check platform settings permission + require_permission(&pool, auth.user_id, perms::PLATFORM_SETTINGS, None).await?; + + let s = sqlx::query!( + r#"UPDATE instance_settings SET + instance_name = COALESCE($1, instance_name), + platform_mode = COALESCE($2, platform_mode), + registration_enabled = COALESCE($3, registration_enabled), + registration_mode = COALESCE($4, registration_mode) + RETURNING id, setup_completed, instance_name, platform_mode, + registration_enabled, registration_mode, + default_community_visibility, allow_private_communities, + default_plugin_policy, default_moderation_mode"#, + req.instance_name, + req.platform_mode, + req.registration_enabled, + req.registration_mode + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(InstanceSettings { + id: s.id, + setup_completed: s.setup_completed, + instance_name: s.instance_name, + platform_mode: s.platform_mode, + registration_enabled: s.registration_enabled, + registration_mode: s.registration_mode, + default_community_visibility: s.default_community_visibility, + allow_private_communities: s.allow_private_communities, + default_plugin_policy: s.default_plugin_policy, + default_moderation_mode: s.default_moderation_mode, + })) +} + +/// Get community settings +async fn get_community_settings( + State(pool): State, + Path(community_id): Path, +) -> Result, (StatusCode, String)> { + // Ensure settings exist + sqlx::query!( + "INSERT INTO community_settings (community_id) VALUES ($1) ON CONFLICT DO NOTHING", + community_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let s = sqlx::query!( + r#"SELECT community_id, membership_mode, moderation_mode, + governance_model, plugin_policy, features_enabled + FROM community_settings WHERE community_id = $1"#, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(CommunitySettings { + community_id: s.community_id, + membership_mode: s.membership_mode, + moderation_mode: s.moderation_mode, + governance_model: s.governance_model, + plugin_policy: s.plugin_policy, + features_enabled: s.features_enabled, + })) +} + +/// Update community settings +async fn update_community_settings( + State(pool): State, + auth: AuthUser, + Path(community_id): Path, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check community settings permission (community admin or platform admin) + require_any_permission( + &pool, + auth.user_id, + &[perms::COMMUNITY_SETTINGS, perms::PLATFORM_ADMIN], + Some(community_id), + ).await?; + + // Ensure settings exist + sqlx::query!( + "INSERT INTO community_settings (community_id) VALUES ($1) ON CONFLICT DO NOTHING", + community_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let s = sqlx::query!( + r#"UPDATE community_settings SET + membership_mode = COALESCE($2, membership_mode), + moderation_mode = COALESCE($3, moderation_mode), + governance_model = COALESCE($4, governance_model), + plugin_policy = COALESCE($5, plugin_policy) + WHERE community_id = $1 + RETURNING community_id, membership_mode, moderation_mode, + governance_model, plugin_policy, features_enabled"#, + community_id, + req.membership_mode, + req.moderation_mode, + req.governance_model, + req.plugin_policy + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(CommunitySettings { + community_id: s.community_id, + membership_mode: s.membership_mode, + moderation_mode: s.moderation_mode, + governance_model: s.governance_model, + plugin_policy: s.plugin_policy, + features_enabled: s.features_enabled, + })) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/settings/setup/status", get(get_setup_status)) + .route("/api/settings/setup", post(complete_setup)) + .route("/api/settings/instance", get(get_instance_settings)) + .route("/api/settings/instance", patch(update_instance_settings)) + .route("/api/settings/communities/{community_id}", get(get_community_settings)) + .route("/api/settings/communities/{community_id}", patch(update_community_settings)) + .with_state(pool) +} diff --git a/backend/src/api/users.rs b/backend/src/api/users.rs new file mode 100644 index 0000000..1b58ca0 --- /dev/null +++ b/backend/src/api/users.rs @@ -0,0 +1,188 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, put}, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::models::user::UserResponse; + +pub fn router(pool: PgPool) -> Router { + Router::new() + .route("/api/users", get(list_users)) + .route("/api/users/{username}", get(get_user_profile)) + .route("/api/users/{username}/votes", get(get_user_votes)) + .route("/api/users/me/profile", put(update_profile)) + .with_state(pool) +} + +async fn list_users( + State(pool): State, +) -> Result>, String> { + let users = sqlx::query_as!( + crate::models::User, + "SELECT * FROM users WHERE is_active = true ORDER BY created_at DESC LIMIT 100" + ) + .fetch_all(&pool) + .await + .map_err(|e| e.to_string())?; + + Ok(Json(users.into_iter().map(UserResponse::from).collect())) +} + +#[derive(Debug, Serialize)] +pub struct UserProfile { + pub id: Uuid, + pub username: String, + pub display_name: Option, + pub created_at: DateTime, + pub communities: Vec, + pub proposal_count: i64, + pub comment_count: i64, +} + +#[derive(Debug, Serialize)] +pub struct CommunityMembership { + pub id: Uuid, + pub name: String, + pub slug: String, + pub role: String, +} + +async fn get_user_profile( + Path(username): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let user = sqlx::query!( + "SELECT id, username, display_name, created_at FROM users WHERE username = $1 AND is_active = true", + username + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "User not found".to_string()))?; + + let communities = sqlx::query!( + r#" + SELECT c.id, c.name, c.slug, cm.role + FROM communities c + JOIN community_members cm ON c.id = cm.community_id + WHERE cm.user_id = $1 AND c.is_active = true + ORDER BY cm.joined_at DESC + "#, + user.id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let proposal_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM proposals WHERE author_id = $1", + user.id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + let comment_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM comments WHERE author_id = $1", + user.id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(0); + + Ok(Json(UserProfile { + id: user.id, + username: user.username, + display_name: user.display_name, + created_at: user.created_at, + communities: communities.into_iter().map(|c| CommunityMembership { + id: c.id, + name: c.name, + slug: c.slug, + role: c.role, + }).collect(), + proposal_count, + comment_count, + })) +} + +#[derive(Debug, Deserialize)] +pub struct UpdateProfile { + pub display_name: Option, +} + +async fn update_profile( + auth: AuthUser, + State(pool): State, + Json(payload): Json, +) -> Result, (StatusCode, String)> { + sqlx::query!( + "UPDATE users SET display_name = $1 WHERE id = $2", + payload.display_name, + auth.user_id + ) + .execute(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(serde_json::json!({ "status": "updated" }))) +} + +#[derive(Debug, Serialize)] +pub struct UserVote { + pub proposal_id: Uuid, + pub proposal_title: String, + pub community_name: String, + pub option_label: String, + pub voted_at: DateTime, +} + +async fn get_user_votes( + Path(username): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let user = sqlx::query!("SELECT id FROM users WHERE username = $1", username) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .ok_or((StatusCode::NOT_FOUND, "User not found".to_string()))?; + + let votes = sqlx::query!( + r#" + SELECT v.created_at as voted_at, po.label as option_label, + p.id as proposal_id, p.title as proposal_title, + c.name as community_name + FROM votes v + JOIN voting_identities vi ON v.voter_id = vi.id + JOIN proposal_options po ON v.option_id = po.id + JOIN proposals p ON po.proposal_id = p.id + JOIN communities c ON p.community_id = c.id + WHERE vi.user_id = $1 + ORDER BY v.created_at DESC + LIMIT 20 + "#, + user.id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let result = votes.into_iter().map(|v| UserVote { + proposal_id: v.proposal_id, + proposal_title: v.proposal_title, + community_name: v.community_name, + option_label: v.option_label, + voted_at: v.voted_at, + }).collect(); + + Ok(Json(result)) +} diff --git a/backend/src/api/voting_config.rs b/backend/src/api/voting_config.rs new file mode 100644 index 0000000..4f77f08 --- /dev/null +++ b/backend/src/api/voting_config.rs @@ -0,0 +1,452 @@ +//! Voting Configuration API +//! +//! Manages voting method plugins at platform and community level. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post, put}, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use sqlx::PgPool; +use uuid::Uuid; +#[allow(unused_imports)] +use chrono::{DateTime, Utc}; + +use crate::auth::AuthUser; +use super::permissions::{require_permission, perms}; + +// ============================================================================ +// Types +// ============================================================================ + +#[derive(Debug, Serialize)] +pub struct VotingMethodPlugin { + pub id: Uuid, + pub name: String, + pub display_name: String, + pub description: Option, + pub icon: Option, + pub is_active: bool, + pub is_default: bool, + pub config_schema: Option, + pub default_config: serde_json::Value, + pub complexity_level: String, + pub supports_delegation: bool, +} + +#[derive(Debug, Serialize)] +pub struct CommunityVotingMethod { + pub id: Uuid, + pub voting_method: VotingMethodPlugin, + pub is_enabled: bool, + pub is_default: bool, + pub config: serde_json::Value, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateVotingMethodRequest { + pub is_active: Option, + pub is_default: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ConfigureCommunityVotingRequest { + pub is_enabled: Option, + pub is_default: Option, + pub config: Option, +} + +#[derive(Debug, Serialize)] +pub struct DefaultPlugin { + pub plugin_name: String, + pub plugin_type: String, + pub display_name: String, + pub description: Option, + pub is_core: bool, + pub is_recommended: bool, + pub default_enabled: bool, + pub category: Option, +} + +#[derive(Debug, Serialize)] +pub struct InstancePlugin { + pub plugin_name: String, + pub is_enabled: bool, + pub config: serde_json::Value, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateInstancePluginRequest { + pub is_enabled: Option, + pub config: Option, +} + +// ============================================================================ +// Platform Voting Methods +// ============================================================================ + +/// List all voting method plugins +async fn list_voting_methods( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let methods = sqlx::query!( + r#"SELECT id, name, display_name, description, icon, is_active, is_default, + config_schema, default_config, complexity_level, supports_delegation + FROM voting_method_plugins ORDER BY name"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(methods.into_iter().map(|m| VotingMethodPlugin { + id: m.id, + name: m.name, + display_name: m.display_name, + description: m.description, + icon: m.icon, + is_active: m.is_active, + is_default: m.is_default, + config_schema: m.config_schema, + default_config: m.default_config.unwrap_or_default(), + complexity_level: m.complexity_level.unwrap_or_default(), + supports_delegation: m.supports_delegation, + }).collect())) +} + +/// Update platform voting method (admin only) +async fn update_voting_method( + auth: AuthUser, + Path(method_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check voting configuration permission + require_permission(&pool, auth.user_id, perms::VOTING_CONFIG, None).await?; + + // If setting as default, unset other defaults first + if req.is_default == Some(true) { + sqlx::query!("UPDATE voting_method_plugins SET is_default = FALSE") + .execute(&pool) + .await + .ok(); + } + + let method = sqlx::query!( + r#"UPDATE voting_method_plugins SET + is_active = COALESCE($2, is_active), + is_default = COALESCE($3, is_default), + updated_at = NOW() + WHERE id = $1 + RETURNING id, name, display_name, description, icon, is_active, is_default, + config_schema, default_config, complexity_level, supports_delegation"#, + method_id, + req.is_active, + req.is_default + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(VotingMethodPlugin { + id: method.id, + name: method.name, + display_name: method.display_name, + description: method.description, + icon: method.icon, + is_active: method.is_active, + is_default: method.is_default, + config_schema: method.config_schema, + default_config: method.default_config.unwrap_or_default(), + complexity_level: method.complexity_level.unwrap_or_default(), + supports_delegation: method.supports_delegation, + })) +} + +// ============================================================================ +// Community Voting Methods +// ============================================================================ + +/// List voting methods for a community (includes enabled status) +async fn list_community_voting_methods( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let methods = sqlx::query!( + r#"SELECT vmp.id, vmp.name, vmp.display_name, vmp.description, vmp.icon, + vmp.is_active as platform_active, vmp.config_schema, vmp.default_config, + vmp.complexity_level, vmp.supports_delegation, + COALESCE(cvm.is_enabled, vmp.is_active) as is_enabled, + COALESCE(cvm.is_default, vmp.is_default) as is_default, + COALESCE(cvm.config, vmp.default_config) as config + FROM voting_method_plugins vmp + LEFT JOIN community_voting_methods cvm + ON vmp.id = cvm.voting_method_id AND cvm.community_id = $1 + WHERE vmp.is_active = TRUE + ORDER BY vmp.name"#, + community_id + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(methods.into_iter().map(|m| CommunityVotingMethod { + id: m.id, + voting_method: VotingMethodPlugin { + id: m.id, + name: m.name.clone(), + display_name: m.display_name.clone(), + description: m.description.clone(), + icon: m.icon.clone(), + is_active: m.platform_active, + is_default: m.is_default.unwrap_or(false), + config_schema: m.config_schema.clone(), + default_config: m.default_config.clone().unwrap_or_default(), + complexity_level: m.complexity_level.clone().unwrap_or_default(), + supports_delegation: m.supports_delegation, + }, + is_enabled: m.is_enabled.unwrap_or(false), + is_default: m.is_default.unwrap_or(false), + config: m.config.unwrap_or_default(), + }).collect())) +} + +/// Configure voting method for a community +async fn configure_community_voting_method( + auth: AuthUser, + Path((community_id, method_id)): Path<(Uuid, Uuid)>, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check permission + let has_perm = sqlx::query_scalar!( + "SELECT user_has_permission($1, 'voting.methods.manage', $2)", + auth.user_id, + community_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if !has_perm { + return Err((StatusCode::FORBIDDEN, "No permission to manage voting methods".to_string())); + } + + // If setting as default, unset other defaults first + if req.is_default == Some(true) { + sqlx::query!( + "UPDATE community_voting_methods SET is_default = FALSE WHERE community_id = $1", + community_id + ) + .execute(&pool) + .await + .ok(); + } + + let result = sqlx::query!( + r#"INSERT INTO community_voting_methods (community_id, voting_method_id, is_enabled, is_default, config) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (community_id, voting_method_id) DO UPDATE SET + is_enabled = COALESCE($3, community_voting_methods.is_enabled), + is_default = COALESCE($4, community_voting_methods.is_default), + config = COALESCE($5, community_voting_methods.config), + updated_at = NOW() + RETURNING id, is_enabled, is_default, config"#, + community_id, + method_id, + req.is_enabled, + req.is_default, + req.config + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Fetch full method info + let method = sqlx::query!( + r#"SELECT id, name, display_name, description, icon, is_active, is_default, + config_schema, default_config, complexity_level, supports_delegation + FROM voting_method_plugins WHERE id = $1"#, + method_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(CommunityVotingMethod { + id: result.id, + voting_method: VotingMethodPlugin { + id: method.id, + name: method.name, + display_name: method.display_name, + description: method.description, + icon: method.icon, + is_active: method.is_active, + is_default: method.is_default, + config_schema: method.config_schema, + default_config: method.default_config.unwrap_or_default(), + complexity_level: method.complexity_level.unwrap_or_default(), + supports_delegation: method.supports_delegation, + }, + is_enabled: result.is_enabled, + is_default: result.is_default, + config: result.config.unwrap_or_default(), + })) +} + +// ============================================================================ +// Default Plugins (for setup) +// ============================================================================ + +/// List default plugins for setup +async fn list_default_plugins( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let plugins = sqlx::query!( + r#"SELECT plugin_name, plugin_type, display_name, description, + is_core, is_recommended, default_enabled, category + FROM default_plugins ORDER BY sort_order, plugin_name"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(plugins.into_iter().map(|p| DefaultPlugin { + plugin_name: p.plugin_name, + plugin_type: p.plugin_type, + display_name: p.display_name, + description: p.description, + is_core: p.is_core, + is_recommended: p.is_recommended, + default_enabled: p.default_enabled, + category: p.category, + }).collect())) +} + +/// List instance plugins +async fn list_instance_plugins( + State(pool): State, +) -> Result>, (StatusCode, String)> { + let plugins = sqlx::query!( + r#"SELECT plugin_name, is_enabled, config FROM instance_plugins ORDER BY plugin_name"# + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(plugins.into_iter().map(|p| InstancePlugin { + plugin_name: p.plugin_name, + is_enabled: p.is_enabled, + config: p.config.unwrap_or_default(), + }).collect())) +} + +/// Update instance plugin +async fn update_instance_plugin( + auth: AuthUser, + Path(plugin_name): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + // Check plugin management permission + require_permission(&pool, auth.user_id, perms::PLATFORM_PLUGINS, None).await?; + + // Check if it's a core plugin that can't be disabled + let is_core = sqlx::query_scalar!( + "SELECT is_core FROM default_plugins WHERE plugin_name = $1", + plugin_name + ) + .fetch_optional(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))? + .unwrap_or(false); + + if is_core && req.is_enabled == Some(false) { + return Err((StatusCode::BAD_REQUEST, "Cannot disable core plugins".to_string())); + } + + let plugin = sqlx::query!( + r#"INSERT INTO instance_plugins (plugin_name, is_enabled, config, enabled_by, enabled_at) + VALUES ($1, $2, $3, $4, NOW()) + ON CONFLICT (plugin_name) DO UPDATE SET + is_enabled = COALESCE($2, instance_plugins.is_enabled), + config = COALESCE($3, instance_plugins.config), + updated_at = NOW() + RETURNING plugin_name, is_enabled, config"#, + plugin_name, + req.is_enabled, + req.config, + auth.user_id + ) + .fetch_one(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(InstancePlugin { + plugin_name: plugin.plugin_name, + is_enabled: plugin.is_enabled, + config: plugin.config.unwrap_or_default(), + })) +} + +/// Initialize default plugins during setup +async fn initialize_default_plugins( + auth: AuthUser, + State(pool): State, + Json(enabled_plugins): Json>, +) -> Result, (StatusCode, String)> { + // Check platform admin permission for setup + require_permission(&pool, auth.user_id, perms::PLATFORM_ADMIN, None).await?; + + // Get all default plugins + let defaults = sqlx::query!( + "SELECT plugin_name, is_core, default_enabled FROM default_plugins" + ) + .fetch_all(&pool) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + // Initialize each plugin + for plugin in defaults { + let is_enabled = plugin.is_core || enabled_plugins.contains(&plugin.plugin_name); + + sqlx::query!( + r#"INSERT INTO instance_plugins (plugin_name, is_enabled, enabled_by, enabled_at) + VALUES ($1, $2, $3, NOW()) + ON CONFLICT (plugin_name) DO UPDATE SET is_enabled = $2"#, + plugin.plugin_name, + is_enabled, + auth.user_id + ) + .execute(&pool) + .await + .ok(); + } + + Ok(Json(serde_json::json!({ + "initialized": true, + "plugins_enabled": enabled_plugins.len() + }))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Platform voting methods + .route("/api/voting-methods", get(list_voting_methods)) + .route("/api/voting-methods/{method_id}", put(update_voting_method)) + // Community voting methods + .route("/api/communities/{community_id}/voting-methods", get(list_community_voting_methods)) + .route("/api/communities/{community_id}/voting-methods/{method_id}", put(configure_community_voting_method)) + // Default plugins + .route("/api/plugins/defaults", get(list_default_plugins)) + .route("/api/plugins/instance", get(list_instance_plugins)) + .route("/api/plugins/instance/{plugin_name}", put(update_instance_plugin)) + .route("/api/plugins/initialize", post(initialize_default_plugins)) + .with_state(pool) +} diff --git a/backend/src/api/workflows.rs b/backend/src/api/workflows.rs new file mode 100644 index 0000000..3d2b1a5 --- /dev/null +++ b/backend/src/api/workflows.rs @@ -0,0 +1,159 @@ +//! Decision Workflows API endpoints. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + routing::{get, post}, + Json, Router, +}; +use serde::Deserialize; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::auth::AuthUser; +use crate::plugins::builtin::decision_workflows::{ + WorkflowInstance, WorkflowPhase, WorkflowService, WorkflowTemplate, +}; + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Debug, Deserialize)] +pub struct CreateTemplateRequest { + pub name: String, + pub description: Option, +} + +#[derive(Debug, Deserialize)] +pub struct AddPhaseRequest { + pub name: String, + pub phase_type: String, + pub sequence_order: i32, + pub duration_hours: i32, + pub quorum_value: f64, +} + +// ============================================================================ +// Handlers +// ============================================================================ + +/// List workflow templates for a community +async fn list_templates( + Path(community_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let templates = WorkflowService::list_templates(&pool, Some(community_id)) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(templates)) +} + +/// Create a new workflow template +async fn create_template( + auth: AuthUser, + Path(community_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let template_id = WorkflowService::create_template( + &pool, + community_id, + &req.name, + req.description.as_deref(), + auth.user_id, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": template_id}))) +} + +/// Get phases for a template +async fn get_template_phases( + Path(template_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let phases = WorkflowService::get_template_phases(&pool, template_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(phases)) +} + +/// Add a phase to a template +async fn add_phase( + _auth: AuthUser, + Path(template_id): Path, + State(pool): State, + Json(req): Json, +) -> Result, (StatusCode, String)> { + let phase_id = WorkflowService::add_phase( + &pool, + template_id, + &req.name, + &req.phase_type, + req.sequence_order, + req.duration_hours, + req.quorum_value, + ) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"id": phase_id}))) +} + +/// Get workflow for a proposal +async fn get_workflow_for_proposal( + Path(proposal_id): Path, + State(pool): State, +) -> Result>, (StatusCode, String)> { + let workflow = WorkflowService::get_workflow_for_proposal(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(workflow)) +} + +/// Get workflow progress +async fn get_workflow_progress( + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let progress = WorkflowService::get_workflow_progress(&pool, proposal_id) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(progress)) +} + +/// Manually advance workflow phase +async fn advance_phase( + auth: AuthUser, + Path(proposal_id): Path, + State(pool): State, +) -> Result, (StatusCode, String)> { + let success = WorkflowService::advance_phase(&pool, proposal_id, Some(auth.user_id), None) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(json!({"success": success}))) +} + +// ============================================================================ +// Router +// ============================================================================ + +pub fn router(pool: PgPool) -> Router { + Router::new() + // Templates + .route("/api/communities/{community_id}/workflow-templates", get(list_templates).post(create_template)) + .route("/api/workflow-templates/{template_id}/phases", get(get_template_phases).post(add_phase)) + // Proposal workflows + .route("/api/proposals/{proposal_id}/workflow", get(get_workflow_for_proposal)) + .route("/api/proposals/{proposal_id}/workflow/progress", get(get_workflow_progress)) + .route("/api/proposals/{proposal_id}/workflow/advance", post(advance_phase)) + .with_state(pool) +} diff --git a/backend/src/auth/jwt.rs b/backend/src/auth/jwt.rs new file mode 100644 index 0000000..9880875 --- /dev/null +++ b/backend/src/auth/jwt.rs @@ -0,0 +1,39 @@ +use chrono::{Duration, Utc}; +use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Claims { + pub sub: Uuid, + pub username: String, + pub exp: i64, + pub iat: i64, +} + +pub fn create_token(user_id: Uuid, username: &str, secret: &str) -> Result { + let now = Utc::now(); + let exp = now + Duration::hours(24); + + let claims = Claims { + sub: user_id, + username: username.to_string(), + exp: exp.timestamp(), + iat: now.timestamp(), + }; + + encode( + &Header::default(), + &claims, + &EncodingKey::from_secret(secret.as_bytes()), + ) +} + +pub fn verify_token(token: &str, secret: &str) -> Result { + let token_data = decode::( + token, + &DecodingKey::from_secret(secret.as_bytes()), + &Validation::default(), + )?; + Ok(token_data.claims) +} diff --git a/backend/src/auth/middleware.rs b/backend/src/auth/middleware.rs new file mode 100644 index 0000000..d14046f --- /dev/null +++ b/backend/src/auth/middleware.rs @@ -0,0 +1,41 @@ +use axum::{ + extract::FromRequestParts, + http::{request::Parts, StatusCode}, +}; +use uuid::Uuid; + +use super::jwt::{verify_token, Claims}; + +pub struct AuthUser { + pub user_id: Uuid, + pub username: String, +} + +impl FromRequestParts for AuthUser +where + S: Send + Sync, +{ + type Rejection = (StatusCode, &'static str); + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let auth_header = parts + .headers + .get("Authorization") + .and_then(|value| value.to_str().ok()) + .ok_or((StatusCode::UNAUTHORIZED, "Missing authorization header"))?; + + let token = auth_header + .strip_prefix("Bearer ") + .ok_or((StatusCode::UNAUTHORIZED, "Invalid authorization header format"))?; + + let secret = std::env::var("JWT_SECRET").unwrap_or_else(|_| "dev-secret-change-in-production".to_string()); + + let claims: Claims = verify_token(token, &secret) + .map_err(|_| (StatusCode::UNAUTHORIZED, "Invalid token"))?; + + Ok(AuthUser { + user_id: claims.sub, + username: claims.username, + }) + } +} diff --git a/backend/src/auth/mod.rs b/backend/src/auth/mod.rs new file mode 100644 index 0000000..461e45c --- /dev/null +++ b/backend/src/auth/mod.rs @@ -0,0 +1,7 @@ +pub mod password; +pub mod jwt; +pub mod middleware; + +pub use password::{hash_password, verify_password}; +pub use jwt::create_token; +pub use middleware::AuthUser; diff --git a/backend/src/auth/password.rs b/backend/src/auth/password.rs new file mode 100644 index 0000000..98a693a --- /dev/null +++ b/backend/src/auth/password.rs @@ -0,0 +1,18 @@ +use argon2::{ + password_hash::{rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, + Argon2, +}; + +pub fn hash_password(password: &str) -> Result { + let salt = SaltString::generate(&mut OsRng); + let argon2 = Argon2::default(); + let hash = argon2.hash_password(password.as_bytes(), &salt)?; + Ok(hash.to_string()) +} + +pub fn verify_password(password: &str, hash: &str) -> Result { + let parsed_hash = PasswordHash::new(hash)?; + Ok(Argon2::default() + .verify_password(password.as_bytes(), &parsed_hash) + .is_ok()) +} diff --git a/backend/src/config/mod.rs b/backend/src/config/mod.rs new file mode 100644 index 0000000..9d0bb85 --- /dev/null +++ b/backend/src/config/mod.rs @@ -0,0 +1,42 @@ +use serde::Deserialize; + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub database_url: String, + pub server_host: String, + pub server_port: u16, + /// Enable demo mode - restricts destructive actions and enables demo accounts + #[serde(default)] + pub demo_mode: bool, + /// Secret key for JWT tokens + #[serde(default = "default_jwt_secret")] + pub jwt_secret: String, +} + +fn default_jwt_secret() -> String { + "change-me-in-production".to_string() +} + +impl Config { + pub fn from_env() -> Result { + dotenvy::dotenv().ok(); + envy::from_env::() + } + + /// Check if demo mode is enabled + pub fn is_demo(&self) -> bool { + self.demo_mode + } +} + +impl Default for Config { + fn default() -> Self { + Self { + database_url: "postgres://likwid:likwid@localhost:5432/likwid".to_string(), + server_host: "127.0.0.1".to_string(), + server_port: 3000, + demo_mode: false, + jwt_secret: default_jwt_secret(), + } + } +} diff --git a/backend/src/db/mod.rs b/backend/src/db/mod.rs new file mode 100644 index 0000000..5d383fd --- /dev/null +++ b/backend/src/db/mod.rs @@ -0,0 +1,9 @@ +use sqlx::postgres::PgPoolOptions; +use sqlx::PgPool; + +pub async fn create_pool(database_url: &str) -> Result { + PgPoolOptions::new() + .max_connections(5) + .connect(database_url) + .await +} diff --git a/backend/src/demo/mod.rs b/backend/src/demo/mod.rs new file mode 100644 index 0000000..b50929b --- /dev/null +++ b/backend/src/demo/mod.rs @@ -0,0 +1,85 @@ +//! Demo mode functionality for Likwid +//! +//! When DEMO_MODE=true, the system: +//! - Restricts certain destructive actions (delete community, change instance settings) +//! - Allows demo accounts to log in with known passwords +//! - Provides seeded data for demonstration purposes + +use sqlx::PgPool; + +/// Demo account credentials: (username, password, display_name) +pub const DEMO_ACCOUNTS: &[(&str, &str, &str)] = &[ + ("contributor", "demo123", "Demo Contributor"), + ("moderator", "demo123", "Demo Moderator"), + ("observer", "demo123", "Demo Observer"), +]; + +/// Check if a username is a demo account +pub fn is_demo_account(username: &str) -> bool { + DEMO_ACCOUNTS.iter().any(|(u, _, _)| *u == username) +} + +/// Verify demo account password +pub fn verify_demo_password(username: &str, password: &str) -> bool { + DEMO_ACCOUNTS + .iter() + .any(|(u, p, _)| *u == username && *p == password) +} + +/// Reset demo data to initial state +/// This clears user-created data and reloads the seed data +pub async fn reset_demo_data(pool: &PgPool) -> Result<(), sqlx::Error> { + tracing::info!("Resetting demo data to initial state..."); + + // Delete data that might have been modified (in reverse dependency order) + // Keep the demo users and communities, but reset their state + + // Clear votes + sqlx::query("DELETE FROM votes WHERE proposal_id IN (SELECT id FROM proposals WHERE community_id IN (SELECT id FROM communities WHERE slug IN ('aurora', 'civic-commons', 'makers')))") + .execute(pool) + .await?; + + // Clear delegated votes + sqlx::query("DELETE FROM delegated_votes WHERE proposal_id IN (SELECT id FROM proposals WHERE community_id IN (SELECT id FROM communities WHERE slug IN ('aurora', 'civic-commons', 'makers')))") + .execute(pool) + .await?; + + // Clear comments added after seed (keep seed comments) + sqlx::query("DELETE FROM comments WHERE id::text NOT LIKE 'com00001-%'") + .execute(pool) + .await?; + + // Reset proposal statuses for active proposals + sqlx::query("UPDATE proposals SET status = 'voting', voting_ends_at = NOW() + INTERVAL '5 days' WHERE id = 'p0000001-0000-0000-0000-000000000002'::uuid") + .execute(pool) + .await?; + sqlx::query("UPDATE proposals SET status = 'voting', voting_ends_at = NOW() + INTERVAL '5 days' WHERE id = 'p0000001-0000-0000-0000-000000000005'::uuid") + .execute(pool) + .await?; + sqlx::query("UPDATE proposals SET status = 'voting', voting_ends_at = NOW() + INTERVAL '5 days' WHERE id = 'p0000001-0000-0000-0000-000000000007'::uuid") + .execute(pool) + .await?; + + tracing::info!("Demo data reset complete"); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_demo_accounts() { + assert!(is_demo_account("contributor")); + assert!(is_demo_account("moderator")); + assert!(is_demo_account("observer")); + assert!(!is_demo_account("admin")); + } + + #[test] + fn test_demo_password() { + assert!(verify_demo_password("contributor", "demo123")); + assert!(!verify_demo_password("contributor", "wrong")); + assert!(!verify_demo_password("unknown", "demo123")); + } +} diff --git a/backend/src/main.rs b/backend/src/main.rs new file mode 100644 index 0000000..d1630d6 --- /dev/null +++ b/backend/src/main.rs @@ -0,0 +1,185 @@ +mod api; +mod auth; +mod config; +mod db; +mod demo; +mod models; +mod plugins; +mod voting; + +use std::net::SocketAddr; +use std::sync::Arc; +use axum::Extension; +use chrono::{Datelike, Timelike, Utc, Weekday}; +use serde_json::json; +use tower_http::cors::{Any, CorsLayer}; +use tower_http::trace::TraceLayer; +use uuid::Uuid; + +use crate::config::Config; +use crate::plugins::HookContext; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::init(); + + dotenvy::dotenv().ok(); + + // Load configuration + let config = Config::from_env().unwrap_or_default(); + let config = Arc::new(config); + + if config.is_demo() { + tracing::info!("🎭 DEMO MODE ENABLED - Some actions are restricted"); + } + + let database_url = std::env::var("DATABASE_URL") + .unwrap_or_else(|_| config.database_url.clone()); + + let pool = db::create_pool(&database_url) + .await + .expect("Failed to create database pool"); + + tracing::info!("Connected to database"); + + sqlx::migrate!("./migrations") + .run(&pool) + .await + .expect("Failed to run database migrations"); + + let cors = CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any); + + let plugins = plugins::PluginManager::new(pool.clone()) + .register_builtin_plugins() + .initialize() + .await + .expect("Failed to initialize plugins"); + + { + let cron_plugins = plugins.clone(); + let cron_pool = pool.clone(); + tokio::spawn(async move { + let mut last_minute_key: i64 = -1; + let mut last_hour_key: i64 = -1; + let mut last_day_key: i64 = -1; + let mut last_week_key: i64 = -1; + let mut last_15min_key: i64 = -1; + + let mut interval = tokio::time::interval(std::time::Duration::from_secs(5)); + loop { + interval.tick().await; + + let now = Utc::now(); + let minute_key = now.timestamp() / 60; + if minute_key == last_minute_key { + continue; + } + last_minute_key = minute_key; + + let ctx = HookContext { + pool: cron_pool.clone(), + community_id: None, + actor_user_id: None, + }; + + let payload = json!({"ts": now.to_rfc3339()}); + cron_plugins.do_action("cron.minute", ctx.clone(), payload.clone()).await; + cron_plugins + .do_action("cron.minutely", ctx.clone(), payload.clone()) + .await; + + let min15_key = now.timestamp() / 900; + if min15_key != last_15min_key { + last_15min_key = min15_key; + cron_plugins + .do_action("cron.every_15_minutes", ctx.clone(), payload.clone()) + .await; + } + + let hour_key = now.timestamp() / 3600; + if hour_key != last_hour_key { + last_hour_key = hour_key; + if now.minute() == 0 { + cron_plugins + .do_action("cron.hourly", ctx.clone(), payload.clone()) + .await; + } + } + + let day_key = now.timestamp() / 86_400; + if day_key != last_day_key { + last_day_key = day_key; + if now.hour() == 0 && now.minute() == 0 { + cron_plugins + .do_action("cron.daily", ctx.clone(), payload.clone()) + .await; + } + } + + let iso_week = now.iso_week(); + let week_key = (iso_week.year() as i64) * 100 + (iso_week.week() as i64); + if week_key != last_week_key { + last_week_key = week_key; + if now.weekday() == Weekday::Mon && now.hour() == 0 && now.minute() == 0 { + cron_plugins + .do_action("cron.weekly", ctx.clone(), payload.clone()) + .await; + } + } + + // WASM plugins need per-community context. + let community_ids: Vec = match sqlx::query_scalar( + "SELECT id FROM communities WHERE is_active = true", + ) + .fetch_all(&cron_pool) + .await + { + Ok(ids) => ids, + Err(e) => { + tracing::error!("cron: failed to list communities: {}", e); + continue; + } + }; + + let mut wasm_hooks: Vec<&'static str> = vec!["cron.minute", "cron.minutely"]; + if min15_key == last_15min_key { + wasm_hooks.push("cron.every_15_minutes"); + } + if now.minute() == 0 { + wasm_hooks.push("cron.hourly"); + } + if now.hour() == 0 && now.minute() == 0 { + wasm_hooks.push("cron.daily"); + if now.weekday() == Weekday::Mon { + wasm_hooks.push("cron.weekly"); + } + } + + for cid in community_ids { + for hook in &wasm_hooks { + cron_plugins + .do_wasm_action_for_community(hook, cid, payload.clone()) + .await; + } + } + } + }); + } + + let app = api::create_router(pool.clone(), config.clone()) + .layer(Extension(plugins)) + .layer(Extension(config.clone())) + .layer(cors) + .layer(TraceLayer::new_for_http()); + + let host: std::net::IpAddr = config.server_host.parse() + .unwrap_or_else(|_| std::net::IpAddr::V4(std::net::Ipv4Addr::new(127, 0, 0, 1))); + let addr = SocketAddr::from((host, config.server_port)); + tracing::info!("Likwid backend listening on http://{}", addr); + + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + axum::serve(listener, app).await.unwrap(); +} diff --git a/backend/src/models/community.rs b/backend/src/models/community.rs new file mode 100644 index 0000000..55224ee --- /dev/null +++ b/backend/src/models/community.rs @@ -0,0 +1,46 @@ +use serde::{Deserialize, Serialize}; +use sqlx::FromRow; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +pub struct Community { + pub id: Uuid, + pub name: String, + pub slug: String, + pub description: Option, + pub settings: serde_json::Value, + pub created_at: DateTime, + pub updated_at: DateTime, + pub is_active: bool, + pub created_by: Option, +} + +#[derive(Debug, Deserialize)] +#[allow(dead_code)] +pub struct CreateCommunity { + pub name: String, + pub slug: String, + pub description: Option, +} + +#[derive(Debug, Serialize)] +pub struct CommunityResponse { + pub id: Uuid, + pub name: String, + pub slug: String, + pub description: Option, + pub created_at: DateTime, +} + +impl From for CommunityResponse { + fn from(c: Community) -> Self { + Self { + id: c.id, + name: c.name, + slug: c.slug, + description: c.description, + created_at: c.created_at, + } + } +} diff --git a/backend/src/models/mod.rs b/backend/src/models/mod.rs new file mode 100644 index 0000000..5f09a8a --- /dev/null +++ b/backend/src/models/mod.rs @@ -0,0 +1,7 @@ +pub mod user; +pub mod community; +pub mod proposal; + +pub use user::User; +pub use community::Community; +pub use proposal::ProposalStatus; diff --git a/backend/src/models/proposal.rs b/backend/src/models/proposal.rs new file mode 100644 index 0000000..a4285fd --- /dev/null +++ b/backend/src/models/proposal.rs @@ -0,0 +1,80 @@ +use serde::{Deserialize, Serialize}; +use sqlx::FromRow; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +#[derive(Debug, Clone, Serialize, Deserialize, sqlx::Type)] +#[sqlx(type_name = "proposal_status", rename_all = "lowercase")] +pub enum ProposalStatus { + Draft, + Discussion, + Voting, + Closed, + Archived, +} + +#[derive(Debug, Clone, Serialize, Deserialize, sqlx::Type)] +#[sqlx(type_name = "deliberation_phase", rename_all = "lowercase")] +pub enum DeliberationPhase { + Drafting, + Informing, + Discussing, + Voting, + Concluded, +} + +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +pub struct Proposal { + pub id: Uuid, + pub community_id: Uuid, + pub author_id: Uuid, + pub title: String, + pub description: String, + pub status: ProposalStatus, + pub voting_method: String, + pub voting_starts_at: Option>, + pub voting_ends_at: Option>, + pub created_at: DateTime, + pub updated_at: DateTime, + pub deliberation_phase: Option, + pub inform_starts_at: Option>, + pub inform_ends_at: Option>, + pub discuss_starts_at: Option>, + pub discuss_ends_at: Option>, + pub min_read_time_seconds: Option, + pub facilitator_id: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +#[allow(dead_code)] +pub struct ProposalOption { + pub id: Uuid, + pub proposal_id: Uuid, + pub label: String, + pub description: Option, + pub sort_order: i32, + pub created_at: DateTime, +} + +#[derive(Debug, Serialize)] +pub struct ProposalWithOptions { + #[serde(flatten)] + pub proposal: Proposal, + pub options: Vec, + pub author_name: String, +} + +#[derive(Debug, Serialize)] +pub struct ProposalOptionWithVotes { + pub id: Uuid, + pub label: String, + pub description: Option, + pub vote_count: i64, +} + +#[derive(Debug, Deserialize)] +pub struct CreateProposal { + pub title: String, + pub description: String, + pub options: Vec, +} diff --git a/backend/src/models/user.rs b/backend/src/models/user.rs new file mode 100644 index 0000000..62990ec --- /dev/null +++ b/backend/src/models/user.rs @@ -0,0 +1,49 @@ +use serde::{Deserialize, Serialize}; +use sqlx::FromRow; +use uuid::Uuid; +use chrono::{DateTime, Utc}; + +#[derive(Debug, Clone, Serialize, Deserialize, FromRow)] +pub struct User { + pub id: Uuid, + pub username: String, + pub email: String, + #[serde(skip_serializing)] + pub password_hash: String, + pub display_name: Option, + pub created_at: DateTime, + pub updated_at: DateTime, + pub is_active: bool, + pub is_admin: bool, + pub invited_by: Option, +} + +#[derive(Debug, Deserialize)] +#[allow(dead_code)] +pub struct CreateUser { + pub username: String, + pub email: String, + pub password: String, + pub display_name: Option, +} + +#[derive(Debug, Serialize)] +pub struct UserResponse { + pub id: Uuid, + pub username: String, + pub email: String, + pub display_name: Option, + pub created_at: DateTime, +} + +impl From for UserResponse { + fn from(user: User) -> Self { + Self { + id: user.id, + username: user.username, + email: user.email, + display_name: user.display_name, + created_at: user.created_at, + } + } +} diff --git a/backend/src/plugins/builtin/comment_notifications.rs b/backend/src/plugins/builtin/comment_notifications.rs new file mode 100644 index 0000000..6d1bb89 --- /dev/null +++ b/backend/src/plugins/builtin/comment_notifications.rs @@ -0,0 +1,133 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use serde_json::json; +use uuid::Uuid; + +use crate::plugins::{ + hooks::{HookContext, PluginError}, + Plugin, PluginMetadata, PluginScope, PluginSystem, +}; + +pub struct CommentNotificationsPlugin; + +impl CommentNotificationsPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for CommentNotificationsPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "comment_notifications", + version: "0.1.0", + description: "Sends proposal authors a notification when someone comments", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "notify_on_reply": { + "type": "boolean", + "title": "Notify on replies", + "description": "Send notifications when someone replies to your comment", + "default": true + }, + "notify_author_only": { + "type": "boolean", + "title": "Author only", + "description": "Only notify the proposal author (not other commenters)", + "default": false + }, + "min_comment_length": { + "type": "integer", + "title": "Minimum comment length", + "description": "Only notify for comments with at least this many characters", + "default": 0, + "minimum": 0, + "maximum": 500 + }, + "notification_title_template": { + "type": "string", + "title": "Notification title template", + "description": "Template for notification title. Use {proposal_title} as placeholder.", + "default": "New comment on \"{proposal_title}\"" + } + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + let plugin_name = self.metadata().name.to_string(); + + system.add_action( + "comment.created", + plugin_name.clone(), + 10, + Arc::new(move |ctx: HookContext, payload| { + let plugin_name = plugin_name.clone(); + Box::pin(async move { + let proposal_author_id = payload + .get("proposal_author_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or_else(|| PluginError::Message("missing proposal_author_id".to_string()))?; + + let commenter_id = payload + .get("commenter_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or_else(|| PluginError::Message("missing commenter_id".to_string()))?; + + if proposal_author_id == commenter_id { + return Ok(()); + } + + let proposal_title = payload + .get("proposal_title") + .and_then(|v| v.as_str()) + .unwrap_or("your proposal"); + + let commenter_name = payload + .get("commenter_name") + .and_then(|v| v.as_str()) + .unwrap_or("Someone"); + + let proposal_id = payload + .get("proposal_id") + .and_then(|v| v.as_str()) + .unwrap_or(""); + + sqlx::query!( + "INSERT INTO notifications (user_id, type, title, message, link) VALUES ($1, $2, $3, $4, $5)", + proposal_author_id, + "comment", + format!("New comment on \"{}\"", proposal_title), + format!("{} commented on your proposal", commenter_name), + format!("/proposals/{}", proposal_id) + ) + .execute(&ctx.pool) + .await?; + + let _ = ctx + .emit_public_event( + Some(&plugin_name), + "notification.created", + json!({ + "type": "comment", + "proposal_id": proposal_id, + "to_user_id": proposal_author_id, + }), + ) + .await; + + Ok(()) + }) + }), + ); + } +} diff --git a/backend/src/plugins/builtin/conflict_resolution.rs b/backend/src/plugins/builtin/conflict_resolution.rs new file mode 100644 index 0000000..0d25543 --- /dev/null +++ b/backend/src/plugins/builtin/conflict_resolution.rs @@ -0,0 +1,515 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct ConflictResolutionPlugin; + +impl ConflictResolutionPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for ConflictResolutionPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "conflict_resolution", + version: "1.0.0", + description: "Structured conflict resolution with mediation", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "auto_assign_mediators": {"type": "boolean", "default": true}, + "min_mediators": {"type": "integer", "default": 1}, + "anonymous_reporting": {"type": "boolean", "default": true} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Auto-assign mediators when conflict reported + system.add_action( + "conflict.reported", + "conflict_resolution".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let Some(conflict_id) = payload.get("conflict_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + { + // Check if auto-assign is enabled + if let Some(community_id) = ctx.community_id { + let auto_assign = ConflictService::should_auto_assign(&ctx.pool, community_id).await?; + if auto_assign { + ConflictService::assign_mediators(&ctx.pool, conflict_id, ctx.actor_user_id).await?; + } + } + } + + ctx.emit_public_event( + Some("conflict_resolution"), + "conflict.reported", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + + // Hook: Notify on compromise proposal + system.add_action( + "conflict.compromise_proposed", + "conflict_resolution".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("conflict_resolution"), + "compromise.proposed", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + + // Hook: Track resolution + system.add_action( + "conflict.resolved", + "conflict_resolution".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("conflict_resolution"), + "conflict.resolved", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConflictCase { + pub id: Uuid, + pub community_id: Uuid, + pub title: String, + pub description: String, + pub conflict_type: String, + pub status: String, + pub severity_level: i32, + pub is_urgent: bool, +} + +/// Compromise proposal for conflict resolution. Used as return type from service methods. +#[allow(dead_code)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CompromiseProposal { + pub id: Uuid, + pub conflict_id: Uuid, + pub title: String, + pub description: String, + pub status: String, + pub proposed_by: Uuid, +} + +pub struct ConflictService; + +impl ConflictService { + /// Check if auto-assign mediators is enabled + pub async fn should_auto_assign(pool: &PgPool, community_id: Uuid) -> Result { + let result = sqlx::query_scalar!( + r#"SELECT COALESCE( + (SELECT (cp.settings->>'auto_assign_mediators')::boolean + FROM community_plugins cp + JOIN plugins p ON p.id = cp.plugin_id + WHERE cp.community_id = $1 AND p.name = 'conflict_resolution'), + true + ) AS "auto_assign!""#, + community_id + ) + .fetch_one(pool) + .await?; + + Ok(result) + } + + /// Report a new conflict + pub async fn report_conflict( + pool: &PgPool, + community_id: Uuid, + title: &str, + description: &str, + conflict_type: &str, + party_a_id: Uuid, + party_b_id: Option, + reported_by: Option, + anonymous: bool, + severity: i32, + ) -> Result { + let conflict_id: Uuid = sqlx::query_scalar( + r#"INSERT INTO conflict_cases ( + community_id, title, description, conflict_type, + party_a_id, party_b_id, reported_by, reported_anonymously, + severity_level + ) VALUES ($1, $2, $3, $4::conflict_type, $5, $6, $7, $8, $9) + RETURNING id"# + ) + .bind(community_id) + .bind(title) + .bind(description) + .bind(conflict_type) + .bind(party_a_id) + .bind(party_b_id) + .bind(if anonymous { None } else { reported_by }) + .bind(anonymous) + .bind(severity) + .fetch_one(pool) + .await?; + + // Log initial history + sqlx::query!( + r#"INSERT INTO conflict_history (conflict_id, action_type, action_description, actor_id) + VALUES ($1, 'conflict_reported', 'Conflict case created', $2)"#, + conflict_id, + reported_by + ) + .execute(pool) + .await?; + + Ok(conflict_id) + } + + /// Assign mediators to a conflict + pub async fn assign_mediators( + pool: &PgPool, + conflict_id: Uuid, + assigned_by: Option, + ) -> Result { + let count: i32 = sqlx::query_scalar("SELECT assign_mediators($1, $2)") + .bind(conflict_id) + .bind(assigned_by) + .fetch_one(pool) + .await?; + + Ok(count) + } + + /// Transition conflict status + pub async fn transition_status( + pool: &PgPool, + conflict_id: Uuid, + new_status: &str, + actor_id: Uuid, + notes: Option<&str>, + ) -> Result { + let success: bool = sqlx::query_scalar( + "SELECT transition_conflict_status($1, $2::conflict_status, $3, $4)" + ) + .bind(conflict_id) + .bind(new_status) + .bind(actor_id) + .bind(notes) + .fetch_one(pool) + .await?; + + Ok(success) + } + + /// Create a compromise proposal + pub async fn propose_compromise( + pool: &PgPool, + conflict_id: Uuid, + title: &str, + description: &str, + proposed_actions: Value, + proposed_by: Uuid, + proposed_by_role: &str, + ) -> Result { + let proposal_id = sqlx::query_scalar!( + r#"INSERT INTO compromise_proposals ( + conflict_id, title, description, proposed_actions, + proposed_by, proposed_by_role + ) VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id"#, + conflict_id, + title, + description, + proposed_actions, + proposed_by, + proposed_by_role + ) + .fetch_one(pool) + .await?; + + // Update conflict status + sqlx::query!( + "UPDATE conflict_cases SET status = 'proposal_phase', updated_at = NOW() WHERE id = $1", + conflict_id + ) + .execute(pool) + .await?; + + Ok(proposal_id) + } + + /// Respond to a compromise proposal + pub async fn respond_to_compromise( + pool: &PgPool, + proposal_id: Uuid, + party: &str, // "a" or "b" + response: &str, // accept, reject, counter + feedback: Option<&str>, + ) -> Result<(), PluginError> { + if party == "a" { + sqlx::query!( + r#"UPDATE compromise_proposals SET + party_a_response = $2, + party_a_response_at = NOW(), + party_a_feedback = $3, + updated_at = NOW() + WHERE id = $1"#, + proposal_id, + response, + feedback + ) + .execute(pool) + .await?; + } else { + sqlx::query!( + r#"UPDATE compromise_proposals SET + party_b_response = $2, + party_b_response_at = NOW(), + party_b_feedback = $3, + updated_at = NOW() + WHERE id = $1"#, + proposal_id, + response, + feedback + ) + .execute(pool) + .await?; + } + + // Check if both parties accepted + let proposal = sqlx::query!( + r#"SELECT party_a_response, party_b_response, conflict_id + FROM compromise_proposals WHERE id = $1"#, + proposal_id + ) + .fetch_one(pool) + .await?; + + if proposal.party_a_response.as_deref() == Some("accept") + && proposal.party_b_response.as_deref() == Some("accept") + { + // Mark proposal as accepted + sqlx::query!( + "UPDATE compromise_proposals SET status = 'accepted', updated_at = NOW() WHERE id = $1", + proposal_id + ) + .execute(pool) + .await?; + + // Mark conflict as resolved + sqlx::query!( + r#"UPDATE conflict_cases SET + status = 'resolved', + resolved_at = NOW(), + resolution_type = 'compromise_accepted', + updated_at = NOW() + WHERE id = $1"#, + proposal.conflict_id + ) + .execute(pool) + .await?; + } + + Ok(()) + } + + /// Schedule a mediation session + pub async fn schedule_session( + pool: &PgPool, + conflict_id: Uuid, + scheduled_at: chrono::DateTime, + duration_minutes: i32, + agenda: Option<&str>, + ) -> Result { + let session_number: i32 = sqlx::query_scalar!( + "SELECT COALESCE(MAX(session_number), 0)::int + 1 FROM mediation_sessions WHERE conflict_id = $1", + conflict_id + ) + .fetch_one(pool) + .await? + .unwrap_or(1); + + let session_id = sqlx::query_scalar!( + r#"INSERT INTO mediation_sessions ( + conflict_id, session_number, scheduled_at, duration_minutes, agenda + ) VALUES ($1, $2, $3, $4, $5) + RETURNING id"#, + conflict_id, + session_number as i32, + scheduled_at, + duration_minutes, + agenda + ) + .fetch_one(pool) + .await?; + + // Update conflict status to mediation if not already + sqlx::query!( + "UPDATE conflict_cases SET status = 'mediation', updated_at = NOW() WHERE id = $1 AND status = 'acknowledged'", + conflict_id + ) + .execute(pool) + .await?; + + Ok(session_id) + } + + /// Add mediation note + pub async fn add_note( + pool: &PgPool, + conflict_id: Uuid, + session_id: Option, + author_id: Uuid, + content: &str, + note_type: &str, + is_confidential: bool, + ) -> Result { + let note_id = sqlx::query_scalar!( + r#"INSERT INTO mediation_notes ( + conflict_id, session_id, author_id, content, note_type, is_confidential + ) VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id"#, + conflict_id, + session_id, + author_id, + content, + note_type, + is_confidential + ) + .fetch_one(pool) + .await?; + + // Update mediator notes count + sqlx::query!( + "UPDATE conflict_mediators SET notes_count = notes_count + 1, last_activity_at = NOW() WHERE conflict_id = $1 AND user_id = $2", + conflict_id, + author_id + ) + .execute(pool) + .await?; + + Ok(note_id) + } + + /// Get conflict details + pub async fn get_conflict(pool: &PgPool, conflict_id: Uuid) -> Result, PluginError> { + let conflict = sqlx::query_as!( + ConflictCase, + r#"SELECT + id, community_id, title, description, + conflict_type::text AS "conflict_type!", + status::text AS "status!", + severity_level, is_urgent + FROM conflict_cases WHERE id = $1"#, + conflict_id + ) + .fetch_optional(pool) + .await?; + + Ok(conflict) + } + + /// Get active conflicts for a community + pub async fn get_active_conflicts(pool: &PgPool, community_id: Uuid) -> Result, PluginError> { + let conflicts = sqlx::query_as!( + ConflictCase, + r#"SELECT + id, community_id, title, description, + conflict_type::text AS "conflict_type!", + status::text AS "status!", + severity_level, is_urgent + FROM conflict_cases + WHERE community_id = $1 AND status NOT IN ('resolved', 'closed') + ORDER BY is_urgent DESC, severity_level DESC, reported_at"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(conflicts) + } + + /// Get statistics for a community + pub async fn get_statistics(pool: &PgPool, community_id: Uuid) -> Result { + let stats = sqlx::query!( + r#"SELECT + total_conflicts::bigint, + resolved_conflicts::bigint, + avg_resolution_days::float8, + mediation_success_rate::float8, + active_conflicts::bigint + FROM get_conflict_statistics($1)"#, + community_id + ) + .fetch_one(pool) + .await?; + + Ok(json!({ + "total_conflicts": stats.total_conflicts, + "resolved_conflicts": stats.resolved_conflicts, + "avg_resolution_days": stats.avg_resolution_days, + "mediation_success_rate": stats.mediation_success_rate, + "active_conflicts": stats.active_conflicts + })) + } + + /// Add user to mediator pool + pub async fn add_to_mediator_pool( + pool: &PgPool, + community_id: Uuid, + user_id: Uuid, + certification_level: Option<&str>, + ) -> Result { + let is_trained = certification_level.is_some(); + let mediator_id: Uuid = sqlx::query_scalar( + r#"INSERT INTO mediator_pool (community_id, user_id, certification_level, is_trained) + VALUES ($1, $2, $3, $4) + ON CONFLICT (community_id, user_id) DO UPDATE SET + certification_level = COALESCE($3, mediator_pool.certification_level), + is_trained = $4 OR mediator_pool.is_trained, + updated_at = NOW() + RETURNING id"# + ) + .bind(community_id) + .bind(user_id) + .bind(certification_level) + .bind(is_trained) + .fetch_one(pool) + .await?; + + Ok(mediator_id) + } +} diff --git a/backend/src/plugins/builtin/decision_workflows.rs b/backend/src/plugins/builtin/decision_workflows.rs new file mode 100644 index 0000000..7b9890c --- /dev/null +++ b/backend/src/plugins/builtin/decision_workflows.rs @@ -0,0 +1,578 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct DecisionWorkflowsPlugin; + +impl DecisionWorkflowsPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for DecisionWorkflowsPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "decision_workflows", + version: "1.0.0", + description: "Composable decision-making workflows with configurable phases", + is_core: true, + scope: PluginScope::Global, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "default_workflow": { + "type": "string", + "title": "Default Workflow", + "default": "Standard Governance" + }, + "allow_custom_workflows": { + "type": "boolean", + "title": "Allow Custom Workflows", + "default": true + }, + "auto_advance_phases": { + "type": "boolean", + "title": "Auto-advance Phases", + "default": true + } + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: When a proposal is created, optionally start a workflow + system.add_action( + "proposal.created", + "decision_workflows".to_string(), + 10, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(proposal_id), Some(community_id)) = ( + payload.get("proposal_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + payload.get("community_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + ) { + // Check if auto-workflow is enabled for this community + let auto_start = WorkflowService::should_auto_start_workflow(&ctx.pool, community_id).await?; + + if auto_start { + let template_id = WorkflowService::get_default_template(&ctx.pool, Some(community_id)).await?; + if let Some(tid) = template_id { + let instance_id = WorkflowService::start_workflow(&ctx.pool, proposal_id, tid).await?; + + ctx.emit_public_event( + Some("decision_workflows"), + "workflow.started", + json!({ + "proposal_id": proposal_id, + "workflow_instance_id": instance_id, + "template_id": tid + }), + ).await?; + } + } + } + Ok(()) + }) + }), + ); + + // Hook: Check for phase transitions periodically + system.add_action( + "cron.minute", + "decision_workflows".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + WorkflowService::check_phase_transitions(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Record participation when user interacts + system.add_action( + "proposal.viewed", + "decision_workflows".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(proposal_id), Some(user_id)) = ( + payload.get("proposal_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + ctx.actor_user_id, + ) { + WorkflowService::record_participation(&ctx.pool, proposal_id, user_id, "viewed").await?; + } + Ok(()) + }) + }), + ); + + system.add_action( + "comment.created", + "decision_workflows".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(proposal_id), Some(user_id)) = ( + payload.get("proposal_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + ctx.actor_user_id, + ) { + WorkflowService::record_participation(&ctx.pool, proposal_id, user_id, "commented").await?; + } + Ok(()) + }) + }), + ); + + system.add_action( + "vote.cast", + "decision_workflows".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(proposal_id), Some(user_id)) = ( + payload.get("proposal_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + ctx.actor_user_id, + ) { + WorkflowService::record_participation(&ctx.pool, proposal_id, user_id, "voted").await?; + } + Ok(()) + }) + }), + ); + } + +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkflowTemplate { + pub id: Uuid, + pub community_id: Option, + pub name: String, + pub description: Option, + pub is_default: bool, + pub is_system: bool, + pub config: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkflowPhase { + pub id: Uuid, + pub template_id: Uuid, + pub name: String, + pub phase_type: String, + pub sequence_order: i32, + pub description: Option, + pub default_duration_hours: i32, + pub quorum_type: String, + pub quorum_value: f64, + pub allow_early_completion: bool, + pub auto_advance: bool, + pub phase_config: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkflowInstance { + pub id: Uuid, + pub proposal_id: Uuid, + pub template_id: Uuid, + pub current_phase_id: Option, + pub status: String, + pub started_at: chrono::DateTime, + pub completed_at: Option>, +} + +/// Workflow phase instance. Used for tracking phase progress. +#[allow(dead_code)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PhaseInstance { + pub id: Uuid, + pub workflow_instance_id: Uuid, + pub phase_id: Uuid, + pub status: String, + pub scheduled_end: Option>, + pub participant_count: i32, + pub quorum_reached: bool, +} + +pub struct WorkflowService; + +impl WorkflowService { + /// Check if workflow should auto-start for a community + pub async fn should_auto_start_workflow( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let result = sqlx::query_scalar!( + r#"SELECT COALESCE( + (SELECT (cp.settings->>'auto_start_workflow')::boolean + FROM community_plugins cp + JOIN plugins p ON p.id = cp.plugin_id + WHERE cp.community_id = $1 AND p.name = 'decision_workflows'), + true + ) AS "auto_start!""#, + community_id + ) + .fetch_one(pool) + .await?; + + Ok(result) + } + + /// Get default workflow template for a community + pub async fn get_default_template( + pool: &PgPool, + community_id: Option, + ) -> Result, PluginError> { + // First check community-specific default + if let Some(cid) = community_id { + let community_default = sqlx::query_scalar!( + r#"SELECT id FROM workflow_templates + WHERE community_id = $1 AND is_default = true + LIMIT 1"#, + cid + ) + .fetch_optional(pool) + .await?; + + if community_default.is_some() { + return Ok(community_default); + } + } + + // Fall back to system default + let system_default = sqlx::query_scalar!( + r#"SELECT id FROM workflow_templates + WHERE community_id IS NULL AND is_system = true AND name = 'Standard Governance' + LIMIT 1"# + ) + .fetch_optional(pool) + .await?; + + Ok(system_default) + } + + /// Start a workflow for a proposal + pub async fn start_workflow( + pool: &PgPool, + proposal_id: Uuid, + template_id: Uuid, + ) -> Result { + let instance_id: Uuid = sqlx::query_scalar( + "SELECT start_workflow($1, $2)" + ) + .bind(proposal_id) + .bind(template_id) + .fetch_one(pool) + .await?; + + Ok(instance_id) + } + + /// Advance to next phase manually + pub async fn advance_phase( + pool: &PgPool, + workflow_instance_id: Uuid, + user_id: Option, + reason: Option<&str>, + ) -> Result, PluginError> { + let new_phase_id: Option = sqlx::query_scalar( + "SELECT advance_workflow_phase($1, 'manual', $2, $3)" + ) + .bind(workflow_instance_id) + .bind(user_id) + .bind(reason) + .fetch_one(pool) + .await?; + + Ok(new_phase_id) + } + + /// Record user participation in current phase + pub async fn record_participation( + pool: &PgPool, + proposal_id: Uuid, + user_id: Uuid, + participation_type: &str, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"INSERT INTO phase_participation (phase_instance_id, user_id, participation_type) + SELECT pi.id, $2, $3 + FROM phase_instances pi + JOIN workflow_instances wi ON wi.id = pi.workflow_instance_id + WHERE wi.proposal_id = $1 AND pi.status = 'active' + ON CONFLICT (phase_instance_id, user_id, participation_type) DO NOTHING"#, + proposal_id, + user_id, + participation_type + ) + .execute(pool) + .await?; + + // Update participant count + sqlx::query!( + r#"UPDATE phase_instances pi + SET participant_count = ( + SELECT COUNT(DISTINCT user_id) + FROM phase_participation + WHERE phase_instance_id = pi.id + ) + FROM workflow_instances wi + WHERE wi.id = pi.workflow_instance_id + AND wi.proposal_id = $1 + AND pi.status = 'active'"#, + proposal_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Check and process phase transitions for all active workflows + pub async fn check_phase_transitions(pool: &PgPool) -> Result<(), PluginError> { + // Find phases that have passed their deadline + let expired_phases = sqlx::query!( + r#"SELECT pi.id, pi.workflow_instance_id, wp.auto_advance, wp.failure_action + FROM phase_instances pi + JOIN workflow_phases wp ON wp.id = pi.phase_id + WHERE pi.status = 'active' AND pi.scheduled_end < NOW()"# + ) + .fetch_all(pool) + .await?; + + for phase in expired_phases { + if phase.auto_advance { + // Auto-advance to next phase + let _: Option = sqlx::query_scalar( + "SELECT advance_workflow_phase($1, 'timeout', NULL, 'Phase duration expired')" + ) + .bind(phase.workflow_instance_id) + .fetch_one(pool) + .await?; + } + } + + // Check quorum for active phases and record snapshots + let active_phases = sqlx::query!( + r#"SELECT pi.id FROM phase_instances pi WHERE pi.status = 'active'"# + ) + .fetch_all(pool) + .await?; + + for phase in active_phases { + // Calculate and record quorum + sqlx::query( + r#"INSERT INTO quorum_snapshots (phase_instance_id, eligible_count, participant_count, quorum_required, quorum_current, is_met, calculation_details) + SELECT $1, q.eligible_count, q.participant_count, q.quorum_required, q.quorum_current, q.is_met, '{}'::jsonb + FROM calculate_phase_quorum($1) q"# + ) + .bind(phase.id) + .execute(pool) + .await?; + + // Update quorum_reached flag + sqlx::query!( + r#"UPDATE phase_instances + SET quorum_reached = ( + SELECT is_met FROM quorum_snapshots + WHERE phase_instance_id = $1 + ORDER BY snapshot_time DESC LIMIT 1 + ), + quorum_reached_at = CASE + WHEN quorum_reached = false AND ( + SELECT is_met FROM quorum_snapshots + WHERE phase_instance_id = $1 + ORDER BY snapshot_time DESC LIMIT 1 + ) THEN NOW() + ELSE quorum_reached_at + END + WHERE id = $1"#, + phase.id + ) + .execute(pool) + .await?; + } + + Ok(()) + } + + /// Get workflow instance for a proposal + pub async fn get_workflow_for_proposal( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result, PluginError> { + let instance = sqlx::query_as!( + WorkflowInstance, + r#"SELECT + id, proposal_id, template_id, current_phase_id, + status, started_at, completed_at + FROM workflow_instances + WHERE proposal_id = $1"#, + proposal_id + ) + .fetch_optional(pool) + .await?; + + Ok(instance) + } + + /// Get all phases for a workflow template + pub async fn get_template_phases( + pool: &PgPool, + template_id: Uuid, + ) -> Result, PluginError> { + let phases = sqlx::query_as!( + WorkflowPhase, + r#"SELECT + id, template_id, name, phase_type::text AS "phase_type!", + sequence_order, description, default_duration_hours, + quorum_type, quorum_value::float8 AS "quorum_value!", + allow_early_completion, auto_advance, phase_config + FROM workflow_phases + WHERE template_id = $1 + ORDER BY sequence_order"#, + template_id + ) + .fetch_all(pool) + .await?; + + Ok(phases) + } + + /// Get workflow progress summary + pub async fn get_workflow_progress( + pool: &PgPool, + workflow_instance_id: Uuid, + ) -> Result { + let progress = sqlx::query!( + r#"SELECT + wi.id, + wi.status, + wt.name AS workflow_name, + wp.name AS current_phase_name, + wp.phase_type::text AS current_phase_type, + pi.scheduled_end, + pi.participant_count, + pi.quorum_reached, + (SELECT COUNT(*) FROM workflow_phases WHERE template_id = wi.template_id) AS total_phases, + (SELECT COUNT(*) FROM phase_instances WHERE workflow_instance_id = wi.id AND status = 'completed') AS completed_phases + FROM workflow_instances wi + JOIN workflow_templates wt ON wt.id = wi.template_id + LEFT JOIN workflow_phases wp ON wp.id = wi.current_phase_id + LEFT JOIN phase_instances pi ON pi.workflow_instance_id = wi.id AND pi.phase_id = wi.current_phase_id + WHERE wi.id = $1"#, + workflow_instance_id + ) + .fetch_optional(pool) + .await?; + + match progress { + Some(p) => Ok(json!({ + "workflow_id": p.id, + "status": p.status, + "workflow_name": p.workflow_name, + "current_phase": p.current_phase_name, + "current_phase_type": p.current_phase_type, + "deadline": p.scheduled_end, + "participants": p.participant_count, + "quorum_reached": p.quorum_reached, + "total_phases": p.total_phases, + "completed_phases": p.completed_phases, + "progress_percentage": p.total_phases.map(|t| { + if t > 0 { + (p.completed_phases.unwrap_or(0) as f64 / t as f64 * 100.0).round() + } else { + 0.0 + } + }) + })), + None => Ok(json!(null)), + } + } + + /// List available workflow templates + pub async fn list_templates( + pool: &PgPool, + community_id: Option, + ) -> Result, PluginError> { + let templates = sqlx::query_as!( + WorkflowTemplate, + r#"SELECT id, community_id, name, description, is_default, is_system, config + FROM workflow_templates + WHERE community_id IS NULL OR community_id = $1 + ORDER BY is_system DESC, name"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(templates) + } + + /// Create a custom workflow template + pub async fn create_template( + pool: &PgPool, + community_id: Uuid, + name: &str, + description: Option<&str>, + created_by: Uuid, + ) -> Result { + let template_id = sqlx::query_scalar!( + r#"INSERT INTO workflow_templates (community_id, name, description, created_by) + VALUES ($1, $2, $3, $4) + RETURNING id"#, + community_id, + name, + description, + created_by + ) + .fetch_one(pool) + .await?; + + Ok(template_id) + } + + /// Add a phase to a workflow template + pub async fn add_phase( + pool: &PgPool, + template_id: Uuid, + name: &str, + phase_type: &str, + sequence_order: i32, + duration_hours: i32, + quorum_value: f64, + ) -> Result { + let phase_id: Uuid = sqlx::query_scalar( + r#"INSERT INTO workflow_phases ( + template_id, name, phase_type, sequence_order, + default_duration_hours, quorum_value + ) + VALUES ($1, $2, $3::workflow_phase_type, $4, $5, $6::numeric) + RETURNING id"# + ) + .bind(template_id) + .bind(name) + .bind(phase_type) + .bind(sequence_order) + .bind(duration_hours) + .bind(quorum_value) + .fetch_one(pool) + .await?; + + Ok(phase_id) + } +} diff --git a/backend/src/plugins/builtin/federation.rs b/backend/src/plugins/builtin/federation.rs new file mode 100644 index 0000000..0e984fb --- /dev/null +++ b/backend/src/plugins/builtin/federation.rs @@ -0,0 +1,410 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct FederationPlugin; + +impl FederationPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for FederationPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "federation", + version: "1.0.0", + description: "Multi-community federation and cross-instance collaboration", + is_core: false, + scope: PluginScope::Global, + default_enabled: false, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "allow_incoming_requests": {"type": "boolean", "default": true}, + "min_trust_level": {"type": "integer", "default": 2}, + "sync_interval_minutes": {"type": "integer", "default": 15} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Sync federated proposals periodically + system.add_action( + "cron.every_15_minutes", + "federation".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + FederationService::sync_all_federations(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Share proposal when created in federated community + system.add_action( + "proposal.created", + "federation".to_string(), + 100, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let Some(proposal_id) = payload.get("proposal_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + { + FederationService::share_proposal_if_federated(&ctx.pool, proposal_id).await?; + } + Ok(()) + }) + }), + ); + + // Hook: Broadcast decision results + system.add_action( + "proposal.decided", + "federation".to_string(), + 100, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let Some(proposal_id) = payload.get("proposal_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + { + FederationService::broadcast_decision(&ctx.pool, proposal_id).await?; + } + Ok(()) + }) + }), + ); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FederatedInstance { + pub id: Uuid, + pub instance_url: String, + pub instance_name: String, + pub status: String, + pub trust_level: i32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CommunityFederation { + pub id: Uuid, + pub local_community_id: Uuid, + pub remote_instance_id: Uuid, + pub remote_community_id: Uuid, + pub remote_community_name: Option, + pub status: String, +} + +pub struct FederationService; + +impl FederationService { + /// Register a new federated instance + pub async fn register_instance( + pool: &PgPool, + url: &str, + name: &str, + description: Option<&str>, + public_key: Option<&str>, + ) -> Result { + let instance_id: Uuid = sqlx::query_scalar( + "SELECT register_federated_instance($1, $2, $3, $4)" + ) + .bind(url) + .bind(name) + .bind(description) + .bind(public_key) + .fetch_one(pool) + .await?; + + Ok(instance_id) + } + + /// Create a federation between communities + pub async fn create_federation( + pool: &PgPool, + local_community_id: Uuid, + remote_instance_id: Uuid, + remote_community_id: Uuid, + remote_community_name: &str, + sync_direction: &str, + ) -> Result { + let federation_id: Uuid = sqlx::query_scalar( + "SELECT create_community_federation($1, $2, $3, $4, $5::sync_direction)" + ) + .bind(local_community_id) + .bind(remote_instance_id) + .bind(remote_community_id) + .bind(remote_community_name) + .bind(sync_direction) + .fetch_one(pool) + .await?; + + Ok(federation_id) + } + + /// Approve a federation locally + pub async fn approve_federation( + pool: &PgPool, + federation_id: Uuid, + _approved_by: Uuid, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"UPDATE community_federations SET + approved_locally = true, + status = CASE WHEN approved_remotely THEN 'active'::federation_status ELSE status END, + updated_at = NOW() + WHERE id = $1"#, + federation_id + ) + .execute(pool) + .await?; + + sqlx::query!( + r#"INSERT INTO federation_sync_log (federation_id, operation_type, direction, success) + VALUES ($1, 'local_approval', 'push', true)"#, + federation_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Sync all active federations + pub async fn sync_all_federations(pool: &PgPool) -> Result { + let federations = sqlx::query!( + r#"SELECT cf.id, cf.local_community_id, cf.remote_instance_id, + fi.instance_url, cf.sync_direction::text AS "sync_direction!" + FROM community_federations cf + JOIN federated_instances fi ON fi.id = cf.remote_instance_id + WHERE cf.status = 'active' AND fi.status = 'active'"# + ) + .fetch_all(pool) + .await?; + + let mut synced = 0; + for fed in federations { + // In a real implementation, this would make HTTP requests to remote instances + // For now, just log the sync attempt + let start = std::time::Instant::now(); + + sqlx::query( + r#"INSERT INTO federation_sync_log + (federation_id, instance_id, operation_type, direction, success, duration_ms) + VALUES ($1, $2, 'scheduled_sync', $3::sync_direction, true, $4)"# + ) + .bind(fed.id) + .bind(fed.remote_instance_id) + .bind(&fed.sync_direction) + .bind(start.elapsed().as_millis() as i32) + .execute(pool) + .await?; + + sqlx::query!( + "UPDATE federated_instances SET last_sync_at = NOW(), total_syncs = total_syncs + 1 WHERE id = $1", + fed.remote_instance_id + ) + .execute(pool) + .await?; + + synced += 1; + } + + Ok(synced) + } + + /// Share a proposal to federated communities + pub async fn share_proposal_if_federated( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result<(), PluginError> { + // Get the proposal's community + let proposal = sqlx::query!( + "SELECT community_id FROM proposals WHERE id = $1", + proposal_id + ) + .fetch_optional(pool) + .await?; + + let Some(proposal) = proposal else { + return Ok(()); + }; + + // Check for active federations that sync proposals + let federations = sqlx::query!( + r#"SELECT id, remote_instance_id, remote_community_id + FROM community_federations + WHERE local_community_id = $1 + AND status = 'active' + AND sync_proposals = true + AND sync_direction IN ('push', 'bidirectional')"#, + proposal.community_id + ) + .fetch_all(pool) + .await?; + + for fed in federations { + // Create federated proposal record + sqlx::query!( + r#"INSERT INTO federated_proposals + (federation_id, local_proposal_id, remote_proposal_id, is_origin_local) + VALUES ($1, $2, $2, true) + ON CONFLICT DO NOTHING"#, + fed.id, + proposal_id + ) + .execute(pool) + .await?; + } + + Ok(()) + } + + /// Broadcast decision to federated communities + pub async fn broadcast_decision( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result<(), PluginError> { + // Find federated proposal + let federated = sqlx::query!( + r#"SELECT fp.id, fp.federation_id + FROM federated_proposals fp + WHERE fp.local_proposal_id = $1"#, + proposal_id + ) + .fetch_all(pool) + .await?; + + for fp in federated { + // Get local vote results + let results = sqlx::query!( + r#"SELECT COUNT(*)::int AS total_votes + FROM votes WHERE proposal_id = $1"#, + proposal_id + ) + .fetch_one(pool) + .await?; + + // Create or update federated decision + sqlx::query!( + r#"INSERT INTO federated_decisions + (federated_proposal_id, decision_type, outcome, total_votes, is_final) + VALUES ($1, 'vote', 'pending', $2, false) + ON CONFLICT DO NOTHING"#, + fp.id, + results.total_votes.unwrap_or(0) + ) + .execute(pool) + .await?; + } + + Ok(()) + } + + /// Get federation statistics for a community + pub async fn get_stats(pool: &PgPool, community_id: Uuid) -> Result { + let stats = sqlx::query!( + "SELECT * FROM get_federation_stats($1)", + community_id + ) + .fetch_one(pool) + .await?; + + Ok(json!({ + "total_federations": stats.total_federations, + "active_federations": stats.active_federations, + "federated_proposals": stats.federated_proposals, + "total_syncs": stats.total_syncs, + "last_sync": stats.last_sync + })) + } + + /// Get all instances + pub async fn get_instances(pool: &PgPool) -> Result, PluginError> { + let instances = sqlx::query_as!( + FederatedInstance, + r#"SELECT id, instance_url, instance_name, status::text AS "status!", trust_level + FROM federated_instances ORDER BY instance_name"# + ) + .fetch_all(pool) + .await?; + + Ok(instances) + } + + /// Get federations for a community + pub async fn get_community_federations( + pool: &PgPool, + community_id: Uuid, + ) -> Result, PluginError> { + let federations = sqlx::query_as!( + CommunityFederation, + r#"SELECT id, local_community_id, remote_instance_id, remote_community_id, + remote_community_name, status::text AS "status!" + FROM community_federations + WHERE local_community_id = $1"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(federations) + } + + /// Submit a federation request (handles incoming requests from other instances) + pub async fn request_federation( + pool: &PgPool, + from_instance_url: &str, + from_community_name: &str, + to_community_id: Uuid, + message: Option<&str>, + ) -> Result { + let request_id = sqlx::query_scalar!( + r#"INSERT INTO federation_requests + (from_instance_url, from_community_name, to_community_id, request_message) + VALUES ($1, $2, $3, $4) + RETURNING id"#, + from_instance_url, + from_community_name, + to_community_id, + message + ) + .fetch_one(pool) + .await?; + + Ok(request_id) + } + + /// Update instance trust level + pub async fn set_trust_level( + pool: &PgPool, + instance_id: Uuid, + trust_level: i32, + ) -> Result<(), PluginError> { + sqlx::query!( + "UPDATE federated_instances SET trust_level = $2, updated_at = NOW() WHERE id = $1", + instance_id, + trust_level.clamp(1, 5) + ) + .execute(pool) + .await?; + + Ok(()) + } +} diff --git a/backend/src/plugins/builtin/governance_analytics.rs b/backend/src/plugins/builtin/governance_analytics.rs new file mode 100644 index 0000000..731851a --- /dev/null +++ b/backend/src/plugins/builtin/governance_analytics.rs @@ -0,0 +1,390 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct GovernanceAnalyticsPlugin; + +impl GovernanceAnalyticsPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for GovernanceAnalyticsPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "governance_analytics", + version: "1.0.0", + description: "Aggregate governance analytics and health metrics", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "snapshot_frequency": {"type": "string", "default": "daily"}, + "public_dashboard": {"type": "boolean", "default": true} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Calculate daily snapshots + system.add_action( + "cron.daily", + "governance_analytics".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + AnalyticsService::calculate_all_snapshots(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Calculate weekly health scores + system.add_action( + "cron.weekly", + "governance_analytics".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + AnalyticsService::calculate_all_health_scores(&ctx.pool).await?; + Ok(()) + }) + }), + ); + } + +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParticipationSnapshot { + pub id: Uuid, + pub community_id: Uuid, + pub snapshot_date: chrono::NaiveDate, + pub total_members: i32, + pub active_members: i32, + pub votes_cast: i32, + pub unique_voters: i32, + pub engagement_score: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GovernanceHealth { + pub community_id: Uuid, + pub overall_health_score: Option, + pub participation_score: Option, + pub efficiency_score: Option, + pub delegation_health_score: Option, + pub power_concentration_risk: Option, +} + +pub struct AnalyticsService; + +impl AnalyticsService { + /// Calculate participation snapshot for a community + pub async fn calculate_snapshot( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let snapshot_id: Uuid = sqlx::query_scalar( + "SELECT calculate_participation_snapshot($1, CURRENT_DATE)" + ) + .bind(community_id) + .fetch_one(pool) + .await?; + + Ok(snapshot_id) + } + + /// Calculate snapshots for all communities + pub async fn calculate_all_snapshots(pool: &PgPool) -> Result { + let communities = sqlx::query_scalar!( + "SELECT id FROM communities WHERE is_active = true" + ) + .fetch_all(pool) + .await?; + + let mut count = 0; + for community_id in communities { + if let Err(e) = Self::calculate_snapshot(pool, community_id).await { + tracing::warn!("Failed to calculate snapshot for {}: {}", community_id, e); + } else { + count += 1; + } + } + + Ok(count) + } + + /// Calculate governance health score for a community + pub async fn calculate_health( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let health_id: Uuid = sqlx::query_scalar( + "SELECT calculate_governance_health($1)" + ) + .bind(community_id) + .fetch_one(pool) + .await?; + + Ok(health_id) + } + + /// Calculate health scores for all communities + pub async fn calculate_all_health_scores(pool: &PgPool) -> Result { + let communities = sqlx::query_scalar!( + "SELECT id FROM communities WHERE is_active = true" + ) + .fetch_all(pool) + .await?; + + let mut count = 0; + for community_id in communities { + if let Err(e) = Self::calculate_health(pool, community_id).await { + tracing::warn!("Failed to calculate health for {}: {}", community_id, e); + } else { + count += 1; + } + } + + Ok(count) + } + + /// Get participation trends for a community + pub async fn get_participation_trends( + pool: &PgPool, + community_id: Uuid, + days: i32, + ) -> Result, PluginError> { + let snapshots = sqlx::query_as!( + ParticipationSnapshot, + r#"SELECT + id, community_id, snapshot_date, + total_members, active_members, + votes_cast, unique_voters, + engagement_score::float8 AS engagement_score + FROM participation_snapshots + WHERE community_id = $1 + AND snapshot_date > CURRENT_DATE - make_interval(days => $2) + ORDER BY snapshot_date DESC"#, + community_id, + days + ) + .fetch_all(pool) + .await?; + + Ok(snapshots) + } + + /// Get current governance health + pub async fn get_health( + pool: &PgPool, + community_id: Uuid, + ) -> Result, PluginError> { + let health = sqlx::query_as!( + GovernanceHealth, + r#"SELECT + community_id, + overall_health_score::float8 AS overall_health_score, + participation_score::float8 AS participation_score, + efficiency_score::float8 AS efficiency_score, + delegation_health_score::float8 AS delegation_health_score, + power_concentration_risk + FROM governance_health_indicators + WHERE community_id = $1 + ORDER BY calculated_at DESC + LIMIT 1"#, + community_id + ) + .fetch_optional(pool) + .await?; + + Ok(health) + } + + /// Get delegation analytics + pub async fn get_delegation_analytics( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let analytics = sqlx::query!( + r#"SELECT + total_delegations, + unique_delegators, + unique_delegates, + max_chain_depth, + avg_chain_depth::float8 AS avg_chain_depth, + top_10_delegate_share::float8 AS top_10_share, + herfindahl_index::float8 AS hhi, + effective_delegates + FROM delegation_analytics + WHERE community_id = $1 + ORDER BY snapshot_date DESC + LIMIT 1"#, + community_id + ) + .fetch_optional(pool) + .await?; + + match analytics { + Some(a) => Ok(json!({ + "total_delegations": a.total_delegations, + "unique_delegators": a.unique_delegators, + "unique_delegates": a.unique_delegates, + "max_chain_depth": a.max_chain_depth, + "avg_chain_depth": a.avg_chain_depth, + "top_10_share": a.top_10_share, + "concentration_index": a.hhi, + "effective_delegates": a.effective_delegates + })), + None => Ok(json!({"message": "No delegation data available"})), + } + } + + /// Get decision load metrics + pub async fn get_decision_load( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let load = sqlx::query!( + r#"SELECT + proposals_in_pipeline, + proposals_needing_review, + proposals_in_voting, + decisions_made, + avg_decision_time_hours::float8 AS avg_time, + quorum_achievement_rate::float8 AS quorum_rate, + stalled_proposals, + bottleneck_phase + FROM decision_load_metrics + WHERE community_id = $1 + ORDER BY period_end DESC + LIMIT 1"#, + community_id + ) + .fetch_optional(pool) + .await?; + + match load { + Some(l) => Ok(json!({ + "in_pipeline": l.proposals_in_pipeline, + "needing_review": l.proposals_needing_review, + "in_voting": l.proposals_in_voting, + "decisions_made": l.decisions_made, + "avg_decision_time_hours": l.avg_time, + "quorum_achievement_rate": l.quorum_rate, + "stalled": l.stalled_proposals, + "bottleneck": l.bottleneck_phase + })), + None => Ok(json!({"message": "No decision load data available"})), + } + } + + /// Get voting method comparison + pub async fn get_voting_method_comparison( + pool: &PgPool, + community_id: Uuid, + ) -> Result, PluginError> { + let methods = sqlx::query!( + r#"SELECT + voting_method, + proposals_using_method, + total_votes_cast, + avg_turnout::float8 AS turnout, + avg_time_to_decide_hours::float8 AS avg_time, + decisive_results, + close_results + FROM voting_method_analytics + WHERE community_id = $1 + ORDER BY proposals_using_method DESC"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(methods.into_iter().map(|m| json!({ + "method": m.voting_method, + "proposals": m.proposals_using_method, + "total_votes": m.total_votes_cast, + "avg_turnout": m.turnout, + "avg_decision_time": m.avg_time, + "decisive_results": m.decisive_results, + "close_results": m.close_results + })).collect()) + } + + /// Get full dashboard data + pub async fn get_dashboard( + pool: &PgPool, + community_id: Uuid, + ) -> Result { + let health = Self::get_health(pool, community_id).await?; + let trends = Self::get_participation_trends(pool, community_id, 30).await?; + let delegation = Self::get_delegation_analytics(pool, community_id).await?; + let load = Self::get_decision_load(pool, community_id).await?; + + Ok(json!({ + "health": health, + "participation_trends": trends, + "delegation": delegation, + "decision_load": load + })) + } + + /// Export analytics data + pub async fn export_data( + pool: &PgPool, + community_id: Uuid, + start_date: chrono::NaiveDate, + end_date: chrono::NaiveDate, + ) -> Result { + let snapshots = sqlx::query!( + r#"SELECT + snapshot_date, total_members, active_members, + proposals_created, votes_cast, unique_voters, + voter_turnout_rate::float8 AS turnout, + engagement_score::float8 AS engagement + FROM participation_snapshots + WHERE community_id = $1 + AND snapshot_date BETWEEN $2 AND $3 + ORDER BY snapshot_date"#, + community_id, + start_date, + end_date + ) + .fetch_all(pool) + .await?; + + Ok(json!({ + "community_id": community_id, + "period": { + "start": start_date, + "end": end_date + }, + "snapshots": snapshots.into_iter().map(|s| json!({ + "date": s.snapshot_date, + "members": s.total_members, + "active": s.active_members, + "proposals": s.proposals_created, + "votes": s.votes_cast, + "voters": s.unique_voters, + "turnout": s.turnout, + "engagement": s.engagement + })).collect::>() + })) + } +} diff --git a/backend/src/plugins/builtin/mod.rs b/backend/src/plugins/builtin/mod.rs new file mode 100644 index 0000000..6393653 --- /dev/null +++ b/backend/src/plugins/builtin/mod.rs @@ -0,0 +1,10 @@ +pub mod comment_notifications; +pub mod conflict_resolution; +pub mod decision_workflows; +pub mod federation; +pub mod governance_analytics; +pub mod moderation_ledger; +pub mod proposal_lifecycle; +pub mod public_data_export; +pub mod self_moderation; +pub mod structured_deliberation; diff --git a/backend/src/plugins/builtin/moderation_ledger.rs b/backend/src/plugins/builtin/moderation_ledger.rs new file mode 100644 index 0000000..fd00729 --- /dev/null +++ b/backend/src/plugins/builtin/moderation_ledger.rs @@ -0,0 +1,636 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::plugins::{ + hooks::{HookContext, PluginError}, + manager::{Plugin, PluginMetadata, PluginScope, PluginSystem}, +}; + +/// Moderation Ledger Plugin +/// +/// Creates an immutable, cryptographically-chained log of all moderation decisions. +/// This plugin is NON-DEACTIVATABLE by design - transparency is not optional. +/// +/// Features: +/// - Immutable entries with SHA-256 hash chain +/// - Full audit trail with actor, target, reason, evidence +/// - Chain verification for tamper detection +/// - Export to JSON/CSV +/// - Community-scoped with independent chains +pub struct ModerationLedgerPlugin; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ModerationActionType { + // Content moderation + ContentRemove, + ContentHide, + ContentRestore, + ContentEdit, + ContentFlag, + ContentUnflag, + + // User moderation + UserWarn, + UserMute, + UserUnmute, + UserSuspend, + UserUnsuspend, + UserBan, + UserUnban, + UserRoleChange, + + // Community moderation + CommunitySettingChange, + CommunityRuleAdd, + CommunityRuleEdit, + CommunityRuleRemove, + + // Proposal/voting moderation + ProposalClose, + ProposalReopen, + ProposalArchive, + VoteInvalidate, + VoteRestore, + + // Escalation + EscalateToAdmin, + EscalateToCommunity, + AppealReceived, + AppealResolved, +} + +impl ModerationActionType { + pub fn as_db_str(&self) -> &'static str { + match self { + Self::ContentRemove => "content_remove", + Self::ContentHide => "content_hide", + Self::ContentRestore => "content_restore", + Self::ContentEdit => "content_edit", + Self::ContentFlag => "content_flag", + Self::ContentUnflag => "content_unflag", + Self::UserWarn => "user_warn", + Self::UserMute => "user_mute", + Self::UserUnmute => "user_unmute", + Self::UserSuspend => "user_suspend", + Self::UserUnsuspend => "user_unsuspend", + Self::UserBan => "user_ban", + Self::UserUnban => "user_unban", + Self::UserRoleChange => "user_role_change", + Self::CommunitySettingChange => "community_setting_change", + Self::CommunityRuleAdd => "community_rule_add", + Self::CommunityRuleEdit => "community_rule_edit", + Self::CommunityRuleRemove => "community_rule_remove", + Self::ProposalClose => "proposal_close", + Self::ProposalReopen => "proposal_reopen", + Self::ProposalArchive => "proposal_archive", + Self::VoteInvalidate => "vote_invalidate", + Self::VoteRestore => "vote_restore", + Self::EscalateToAdmin => "escalate_to_admin", + Self::EscalateToCommunity => "escalate_to_community", + Self::AppealReceived => "appeal_received", + Self::AppealResolved => "appeal_resolved", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LedgerEntry { + pub id: Uuid, + pub sequence_number: i64, + pub community_id: Option, + pub actor_user_id: Uuid, + pub actor_role: String, + pub action_type: String, + pub target_type: String, + pub target_id: Uuid, + pub reason: String, + pub rule_reference: Option, + pub evidence: Option, + pub duration_hours: Option, + pub decision_type: String, + pub entry_hash: String, + pub created_at: chrono::DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChainVerificationResult { + pub is_valid: bool, + pub total_entries: i64, + pub broken_at_sequence: Option, + pub expected_hash: Option, + pub actual_hash: Option, + pub error_message: Option, +} + +/// Service for interacting with the moderation ledger +pub struct LedgerService; + +impl LedgerService { + /// Create a new ledger entry + pub async fn create_entry( + pool: &PgPool, + community_id: Option, + actor_user_id: Uuid, + actor_role: &str, + action_type: ModerationActionType, + target_type: &str, + target_id: Uuid, + reason: &str, + rule_reference: Option<&str>, + evidence: Option, + target_snapshot: Option, + duration_hours: Option, + decision_type: &str, + vote_proposal_id: Option, + vote_result: Option, + ) -> Result { + let entry_id: Uuid = sqlx::query_scalar( + r#"SELECT create_ledger_entry( + $1, $2, $3, $4::moderation_action_type, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14 + )"#, + ) + .bind(community_id) + .bind(actor_user_id) + .bind(actor_role) + .bind(action_type.as_db_str()) + .bind(target_type) + .bind(target_id) + .bind(reason) + .bind(rule_reference) + .bind(evidence) + .bind(target_snapshot) + .bind(duration_hours) + .bind(decision_type) + .bind(vote_proposal_id) + .bind(vote_result) + .fetch_one(pool) + .await?; + + Ok(entry_id) + } + + /// Get ledger entries for a community + pub async fn get_entries( + pool: &PgPool, + community_id: Option, + limit: i64, + offset: i64, + ) -> Result, PluginError> { + let entries = sqlx::query_as!( + LedgerEntry, + r#"SELECT + id, + sequence_number, + community_id, + actor_user_id, + actor_role, + action_type::text AS "action_type!", + target_type, + target_id, + reason, + rule_reference, + evidence, + duration_hours, + decision_type, + entry_hash, + created_at + FROM moderation_ledger + WHERE community_id IS NOT DISTINCT FROM $1 + ORDER BY sequence_number DESC + LIMIT $2 OFFSET $3"#, + community_id, + limit, + offset, + ) + .fetch_all(pool) + .await?; + + Ok(entries) + } + + /// Get a single entry by ID + pub async fn get_entry(pool: &PgPool, entry_id: Uuid) -> Result, PluginError> { + let entry = sqlx::query_as!( + LedgerEntry, + r#"SELECT + id, + sequence_number, + community_id, + actor_user_id, + actor_role, + action_type::text AS "action_type!", + target_type, + target_id, + reason, + rule_reference, + evidence, + duration_hours, + decision_type, + entry_hash, + created_at + FROM moderation_ledger + WHERE id = $1"#, + entry_id, + ) + .fetch_optional(pool) + .await?; + + Ok(entry) + } + + /// Get entries targeting a specific entity + pub async fn get_entries_for_target( + pool: &PgPool, + target_type: &str, + target_id: Uuid, + ) -> Result, PluginError> { + let entries = sqlx::query_as!( + LedgerEntry, + r#"SELECT + id, + sequence_number, + community_id, + actor_user_id, + actor_role, + action_type::text AS "action_type!", + target_type, + target_id, + reason, + rule_reference, + evidence, + duration_hours, + decision_type, + entry_hash, + created_at + FROM moderation_ledger + WHERE target_type = $1 AND target_id = $2 + ORDER BY sequence_number DESC"#, + target_type, + target_id, + ) + .fetch_all(pool) + .await?; + + Ok(entries) + } + + /// Verify the chain integrity + pub async fn verify_chain( + pool: &PgPool, + community_id: Option, + ) -> Result { + let result = sqlx::query!( + r#"SELECT + is_valid, + total_entries, + broken_at_sequence, + expected_hash, + actual_hash, + error_message + FROM verify_ledger_chain($1)"#, + community_id, + ) + .fetch_one(pool) + .await?; + + Ok(ChainVerificationResult { + is_valid: result.is_valid.unwrap_or(false), + total_entries: result.total_entries.unwrap_or(0), + broken_at_sequence: result.broken_at_sequence, + expected_hash: result.expected_hash, + actual_hash: result.actual_hash, + error_message: result.error_message, + }) + } + + /// Get statistics for a community + pub async fn get_stats( + pool: &PgPool, + community_id: Option, + ) -> Result { + let stats = sqlx::query!( + r#"SELECT + action_type::text AS action_type, + decision_type, + total_actions, + unique_actors, + unique_targets + FROM v_moderation_stats + WHERE community_id IS NOT DISTINCT FROM $1"#, + community_id, + ) + .fetch_all(pool) + .await?; + + let summary: Vec = stats + .into_iter() + .map(|s| { + json!({ + "action_type": s.action_type, + "decision_type": s.decision_type, + "total_actions": s.total_actions, + "unique_actors": s.unique_actors, + "unique_targets": s.unique_targets, + }) + }) + .collect(); + + Ok(json!({ "stats": summary })) + } + + /// Export ledger entries as JSON + pub async fn export_json( + pool: &PgPool, + community_id: Option, + ) -> Result { + let entries = Self::get_entries(pool, community_id, 100000, 0).await?; + let verification = Self::verify_chain(pool, community_id).await?; + + Ok(json!({ + "export_version": "1.0", + "exported_at": chrono::Utc::now(), + "community_id": community_id, + "chain_verification": verification, + "entries": entries, + })) + } +} + +#[async_trait] +impl Plugin for ModerationLedgerPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "moderation_ledger", + version: "1.0.0", + description: "Immutable, cryptographically-chained log of all moderation decisions. Cannot be deactivated.", + is_core: true, // Core plugin - cannot be disabled + scope: PluginScope::Global, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "require_reason_min_length": { + "type": "integer", + "title": "Minimum reason length", + "description": "Minimum characters required for moderation justifications", + "default": 20, + "minimum": 10, + "maximum": 500 + }, + "require_rule_reference": { + "type": "boolean", + "title": "Require rule reference", + "description": "Require moderators to cite a specific community rule", + "default": false + }, + "public_ledger": { + "type": "boolean", + "title": "Public ledger", + "description": "Allow all community members to view the moderation ledger", + "default": true + } + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + let plugin_name = self.metadata().name.to_string(); + + // Hook: Log content removal + system.add_action( + "moderation.content_removed", + plugin_name.clone(), + 1, // Highest priority - must log before anything else + Arc::new(move |ctx: HookContext, payload: Value| { + let plugin_name = plugin_name.clone(); + Box::pin(async move { + let actor_id = ctx.actor_user_id + .ok_or_else(|| PluginError::Message("Missing actor_user_id".into()))?; + + let target_id = payload + .get("content_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or_else(|| PluginError::Message("Missing content_id".into()))?; + + let content_type = payload + .get("content_type") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"); + + let reason = payload + .get("reason") + .and_then(|v| v.as_str()) + .unwrap_or("No reason provided"); + + let actor_role = payload + .get("actor_role") + .and_then(|v| v.as_str()) + .unwrap_or("moderator"); + + let entry_id = LedgerService::create_entry( + &ctx.pool, + ctx.community_id, + actor_id, + actor_role, + ModerationActionType::ContentRemove, + content_type, + target_id, + reason, + payload.get("rule_reference").and_then(|v| v.as_str()), + payload.get("evidence").cloned(), + payload.get("content_snapshot").cloned(), + None, + "unilateral", + None, + None, + ).await?; + + let _ = ctx.emit_public_event( + Some(&plugin_name), + "ledger.entry_created", + json!({ + "entry_id": entry_id, + "action_type": "content_remove", + "target_type": content_type, + }), + ).await; + + Ok(()) + }) + }), + ); + + // Hook: Log user moderation + let plugin_name2 = self.metadata().name.to_string(); + system.add_action( + "moderation.user_action", + plugin_name2.clone(), + 1, + Arc::new(move |ctx: HookContext, payload: Value| { + let plugin_name = plugin_name2.clone(); + Box::pin(async move { + let actor_id = ctx.actor_user_id + .ok_or_else(|| PluginError::Message("Missing actor_user_id".into()))?; + + let target_user_id = payload + .get("target_user_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or_else(|| PluginError::Message("Missing target_user_id".into()))?; + + let action = payload + .get("action") + .and_then(|v| v.as_str()) + .unwrap_or("user_warn"); + + let action_type = match action { + "warn" => ModerationActionType::UserWarn, + "mute" => ModerationActionType::UserMute, + "unmute" => ModerationActionType::UserUnmute, + "suspend" => ModerationActionType::UserSuspend, + "unsuspend" => ModerationActionType::UserUnsuspend, + "ban" => ModerationActionType::UserBan, + "unban" => ModerationActionType::UserUnban, + _ => ModerationActionType::UserWarn, + }; + + let reason = payload + .get("reason") + .and_then(|v| v.as_str()) + .unwrap_or("No reason provided"); + + let actor_role = payload + .get("actor_role") + .and_then(|v| v.as_str()) + .unwrap_or("moderator"); + + let duration = payload + .get("duration_hours") + .and_then(|v| v.as_i64()) + .map(|d| d as i32); + + let entry_id = LedgerService::create_entry( + &ctx.pool, + ctx.community_id, + actor_id, + actor_role, + action_type, + "user", + target_user_id, + reason, + payload.get("rule_reference").and_then(|v| v.as_str()), + payload.get("evidence").cloned(), + None, + duration, + "unilateral", + None, + None, + ).await?; + + let _ = ctx.emit_public_event( + Some(&plugin_name), + "ledger.entry_created", + json!({ + "entry_id": entry_id, + "action_type": action, + "target_type": "user", + }), + ).await; + + Ok(()) + }) + }), + ); + + // Hook: Log proposal moderation + let plugin_name3 = self.metadata().name.to_string(); + system.add_action( + "moderation.proposal_action", + plugin_name3.clone(), + 1, + Arc::new(move |ctx: HookContext, payload: Value| { + let plugin_name = plugin_name3.clone(); + Box::pin(async move { + let actor_id = ctx.actor_user_id + .ok_or_else(|| PluginError::Message("Missing actor_user_id".into()))?; + + let proposal_id = payload + .get("proposal_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .ok_or_else(|| PluginError::Message("Missing proposal_id".into()))?; + + let action = payload + .get("action") + .and_then(|v| v.as_str()) + .unwrap_or("close"); + + let action_type = match action { + "close" => ModerationActionType::ProposalClose, + "reopen" => ModerationActionType::ProposalReopen, + "archive" => ModerationActionType::ProposalArchive, + _ => ModerationActionType::ProposalClose, + }; + + let reason = payload + .get("reason") + .and_then(|v| v.as_str()) + .unwrap_or("No reason provided"); + + let actor_role = payload + .get("actor_role") + .and_then(|v| v.as_str()) + .unwrap_or("moderator"); + + let decision_type = payload + .get("decision_type") + .and_then(|v| v.as_str()) + .unwrap_or("unilateral"); + + let vote_proposal_id = payload + .get("vote_proposal_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()); + + let entry_id = LedgerService::create_entry( + &ctx.pool, + ctx.community_id, + actor_id, + actor_role, + action_type, + "proposal", + proposal_id, + reason, + payload.get("rule_reference").and_then(|v| v.as_str()), + payload.get("evidence").cloned(), + payload.get("proposal_snapshot").cloned(), + None, + decision_type, + vote_proposal_id, + payload.get("vote_result").cloned(), + ).await?; + + let _ = ctx.emit_public_event( + Some(&plugin_name), + "ledger.entry_created", + json!({ + "entry_id": entry_id, + "action_type": action, + "target_type": "proposal", + }), + ).await; + + Ok(()) + }) + }), + ); + } +} diff --git a/backend/src/plugins/builtin/proposal_lifecycle.rs b/backend/src/plugins/builtin/proposal_lifecycle.rs new file mode 100644 index 0000000..6cbd616 --- /dev/null +++ b/backend/src/plugins/builtin/proposal_lifecycle.rs @@ -0,0 +1,477 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct ProposalLifecyclePlugin; + +impl ProposalLifecyclePlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for ProposalLifecyclePlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "proposal_lifecycle", + version: "1.0.0", + description: "Proposal lifecycle with versioning, diffs, and forks", + is_core: true, + scope: PluginScope::Global, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "require_review": {"type": "boolean", "default": true}, + "allow_forks": {"type": "boolean", "default": true}, + "allow_amendments": {"type": "boolean", "default": true} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Track proposal edits as versions + system.add_action( + "proposal.updated", + "proposal_lifecycle".to_string(), + 10, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let Some(proposal_id) = payload.get("proposal_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + { + let title = payload.get("title").and_then(|v| v.as_str()).unwrap_or(""); + let content = payload.get("content").and_then(|v| v.as_str()).unwrap_or(""); + let summary = payload.get("change_summary").and_then(|v| v.as_str()); + + if let Some(user_id) = ctx.actor_user_id { + LifecycleService::create_version( + &ctx.pool, proposal_id, title, content, + user_id, "edit", summary + ).await?; + } + } + Ok(()) + }) + }), + ); + + // Hook: Emit events on status transitions + system.add_action( + "proposal.status.changed", + "proposal_lifecycle".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("proposal_lifecycle"), + "status.transition", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + + // Hook: Track forks + system.add_action( + "proposal.forked", + "proposal_lifecycle".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("proposal_lifecycle"), + "proposal.forked", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + } + +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProposalVersion { + pub id: Uuid, + pub proposal_id: Uuid, + pub version_number: i32, + pub title: String, + pub content: String, + pub change_type: String, + pub change_summary: Option, + pub created_by: Uuid, + pub created_at: chrono::DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProposalAmendment { + pub id: Uuid, + pub proposal_id: Uuid, + pub title: String, + pub description: String, + pub status: String, + pub proposed_by: Uuid, + pub support_count: i32, + pub oppose_count: i32, +} + +pub struct LifecycleService; + +impl LifecycleService { + /// Create a new version of a proposal + pub async fn create_version( + pool: &PgPool, + proposal_id: Uuid, + title: &str, + content: &str, + created_by: Uuid, + change_type: &str, + change_summary: Option<&str>, + ) -> Result { + let version: i32 = sqlx::query_scalar( + "SELECT create_proposal_version($1, $2, $3, NULL, $4, $5, $6)" + ) + .bind(proposal_id) + .bind(title) + .bind(content) + .bind(created_by) + .bind(change_type) + .bind(change_summary) + .fetch_one(pool) + .await?; + + Ok(version) + } + + /// Transition proposal to a new status + pub async fn transition_status( + pool: &PgPool, + proposal_id: Uuid, + new_status: &str, + triggered_by: Uuid, + trigger_type: &str, + reason: Option<&str>, + ) -> Result { + let success: bool = sqlx::query_scalar( + "SELECT transition_proposal_status($1, $2::proposal_lifecycle_status, $3, $4, $5)" + ) + .bind(proposal_id) + .bind(new_status) + .bind(triggered_by) + .bind(trigger_type) + .bind(reason) + .fetch_one(pool) + .await?; + + Ok(success) + } + + /// Fork a proposal + pub async fn fork_proposal( + pool: &PgPool, + source_proposal_id: Uuid, + forked_by: Uuid, + reason: &str, + community_id: Uuid, + ) -> Result { + let new_id: Uuid = sqlx::query_scalar( + "SELECT fork_proposal($1, $2, $3, $4)" + ) + .bind(source_proposal_id) + .bind(forked_by) + .bind(reason) + .bind(community_id) + .fetch_one(pool) + .await?; + + Ok(new_id) + } + + /// Get version history for a proposal + pub async fn get_versions( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result, PluginError> { + let versions = sqlx::query_as!( + ProposalVersion, + r#"SELECT + id, proposal_id, version_number, title, content, + change_type, change_summary, created_by, created_at + FROM proposal_versions + WHERE proposal_id = $1 + ORDER BY version_number DESC"#, + proposal_id + ) + .fetch_all(pool) + .await?; + + Ok(versions) + } + + /// Get a specific version + pub async fn get_version( + pool: &PgPool, + proposal_id: Uuid, + version_number: i32, + ) -> Result, PluginError> { + let version = sqlx::query_as!( + ProposalVersion, + r#"SELECT + id, proposal_id, version_number, title, content, + change_type, change_summary, created_by, created_at + FROM proposal_versions + WHERE proposal_id = $1 AND version_number = $2"#, + proposal_id, + version_number + ) + .fetch_optional(pool) + .await?; + + Ok(version) + } + + /// Compare two versions (returns diff info) + pub async fn compare_versions( + pool: &PgPool, + proposal_id: Uuid, + from_version: i32, + to_version: i32, + ) -> Result { + let from = Self::get_version(pool, proposal_id, from_version).await?; + let to = Self::get_version(pool, proposal_id, to_version).await?; + + match (from, to) { + (Some(f), Some(t)) => { + Ok(json!({ + "from_version": from_version, + "to_version": to_version, + "title_changed": f.title != t.title, + "content_changed": f.content != t.content, + "from_title": f.title, + "to_title": t.title, + "from_content_length": f.content.len(), + "to_content_length": t.content.len(), + "change_summary": t.change_summary + })) + } + _ => Ok(json!({"error": "Version not found"})), + } + } + + /// Propose an amendment + pub async fn propose_amendment( + pool: &PgPool, + proposal_id: Uuid, + title: &str, + description: &str, + suggested_changes: Value, + proposed_by: Uuid, + ) -> Result { + // Get current version + let current_version: i32 = sqlx::query_scalar!( + "SELECT current_version FROM proposal_lifecycle WHERE proposal_id = $1", + proposal_id + ) + .fetch_one(pool) + .await?; + + let amendment_id = sqlx::query_scalar!( + r#"INSERT INTO proposal_amendments ( + proposal_id, target_version, title, description, + suggested_changes, proposed_by + ) VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id"#, + proposal_id, + current_version, + title, + description, + suggested_changes, + proposed_by + ) + .fetch_one(pool) + .await?; + + // Update amendment count + sqlx::query!( + "UPDATE proposal_lifecycle SET amendment_count = amendment_count + 1 WHERE proposal_id = $1", + proposal_id + ) + .execute(pool) + .await?; + + Ok(amendment_id) + } + + /// Support or oppose an amendment + pub async fn vote_amendment( + pool: &PgPool, + amendment_id: Uuid, + user_id: Uuid, + support_type: &str, + comment: Option<&str>, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"INSERT INTO amendment_support (amendment_id, user_id, support_type, comment) + VALUES ($1, $2, $3, $4) + ON CONFLICT (amendment_id, user_id) DO UPDATE SET + support_type = $3, + comment = $4"#, + amendment_id, + user_id, + support_type, + comment + ) + .execute(pool) + .await?; + + // Update counts + sqlx::query!( + r#"UPDATE proposal_amendments SET + support_count = (SELECT COUNT(*) FROM amendment_support WHERE amendment_id = $1 AND support_type = 'support'), + oppose_count = (SELECT COUNT(*) FROM amendment_support WHERE amendment_id = $1 AND support_type = 'oppose') + WHERE id = $1"#, + amendment_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Get amendments for a proposal + pub async fn get_amendments( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result, PluginError> { + let amendments = sqlx::query_as!( + ProposalAmendment, + r#"SELECT + id, proposal_id, title, description, + status::text AS "status!", proposed_by, + support_count, oppose_count + FROM proposal_amendments + WHERE proposal_id = $1 + ORDER BY proposed_at DESC"#, + proposal_id + ) + .fetch_all(pool) + .await?; + + Ok(amendments) + } + + /// Accept an amendment (author action) + pub async fn accept_amendment( + pool: &PgPool, + amendment_id: Uuid, + reviewer_id: Uuid, + response: Option<&str>, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"UPDATE proposal_amendments SET + status = 'accepted', + reviewed_by = $2, + reviewed_at = NOW(), + review_response = $3 + WHERE id = $1"#, + amendment_id, + reviewer_id, + response + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Get lifecycle summary for a proposal + pub async fn get_lifecycle_summary( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result { + let summary = sqlx::query!( + r#"SELECT + pl.current_status::text AS "status!", + pl.current_version, + pl.submitted_at, + pl.activated_at, + pl.voting_started_at, + pl.resolved_at, + pl.revision_count, + pl.fork_count, + pl.amendment_count, + pl.forked_from_id + FROM proposal_lifecycle pl + WHERE pl.proposal_id = $1"#, + proposal_id + ) + .fetch_optional(pool) + .await?; + + match summary { + Some(s) => Ok(json!({ + "status": s.status, + "current_version": s.current_version, + "submitted_at": s.submitted_at, + "activated_at": s.activated_at, + "voting_started_at": s.voting_started_at, + "resolved_at": s.resolved_at, + "revision_count": s.revision_count, + "fork_count": s.fork_count, + "amendment_count": s.amendment_count, + "forked_from": s.forked_from_id + })), + None => Ok(json!({"status": "unknown"})), + } + } + + /// Get forks of a proposal + pub async fn get_forks( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result, PluginError> { + let forks = sqlx::query!( + r#"SELECT + pf.fork_proposal_id, + p.title AS fork_title, + u.username AS forked_by_username, + pf.forked_at, + pf.fork_reason, + pf.is_competing, + pf.is_merged + FROM proposal_forks pf + JOIN proposals p ON p.id = pf.fork_proposal_id + JOIN users u ON u.id = pf.forked_by + WHERE pf.source_proposal_id = $1 + ORDER BY pf.forked_at DESC"#, + proposal_id + ) + .fetch_all(pool) + .await?; + + Ok(forks.into_iter().map(|f| json!({ + "fork_id": f.fork_proposal_id, + "title": f.fork_title, + "forked_by": f.forked_by_username, + "forked_at": f.forked_at, + "reason": f.fork_reason, + "is_competing": f.is_competing, + "is_merged": f.is_merged + })).collect()) + } +} diff --git a/backend/src/plugins/builtin/public_data_export.rs b/backend/src/plugins/builtin/public_data_export.rs new file mode 100644 index 0000000..e28374b --- /dev/null +++ b/backend/src/plugins/builtin/public_data_export.rs @@ -0,0 +1,414 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct PublicDataExportPlugin; + +impl PublicDataExportPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for PublicDataExportPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "public_data_export", + version: "1.0.0", + description: "Privacy-aware CSV/JSON data exports", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "default_anonymize": {"type": "boolean", "default": true}, + "allow_public_exports": {"type": "boolean", "default": true}, + "max_export_records": {"type": "integer", "default": 100000} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Process pending export jobs + system.add_action( + "cron.minutely", + "public_data_export".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + ExportService::process_pending_jobs(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Clean up expired exports + system.add_action( + "cron.daily", + "public_data_export".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + ExportService::cleanup_expired(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Log export downloads + system.add_action( + "export.downloaded", + "public_data_export".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let Some(job_id) = payload.get("job_id") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + { + ExportService::record_download(&ctx.pool, job_id, ctx.actor_user_id).await?; + } + Ok(()) + }) + }), + ); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportJob { + pub id: Uuid, + pub community_id: Option, + pub export_type: String, + pub format: String, + pub status: String, + pub record_count: Option, + pub download_url: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportConfig { + pub id: Uuid, + pub community_id: Option, + pub name: String, + pub export_type: String, + pub public_access: bool, +} + +pub struct ExportService; + +impl ExportService { + /// Create a new export job + pub async fn create_job( + pool: &PgPool, + community_id: Uuid, + export_type: &str, + format: &str, + requested_by: Option, + date_from: Option>, + date_to: Option>, + ) -> Result { + let job_id: Uuid = sqlx::query_scalar( + "SELECT create_export_job($1, $2, $3::export_format, $4, $5, $6)" + ) + .bind(community_id) + .bind(export_type) + .bind(format) + .bind(requested_by) + .bind(date_from) + .bind(date_to) + .fetch_one(pool) + .await?; + + Ok(job_id) + } + + /// Process pending export jobs + pub async fn process_pending_jobs(pool: &PgPool) -> Result { + let pending_jobs = sqlx::query!( + r#"SELECT id, community_id, export_type, format::text AS "format!" + FROM export_jobs WHERE status = 'pending' + ORDER BY requested_at LIMIT 5"# + ) + .fetch_all(pool) + .await?; + + let mut processed = 0; + for job in pending_jobs { + // Mark as processing + sqlx::query!( + "UPDATE export_jobs SET status = 'processing', started_at = NOW() WHERE id = $1", + job.id + ) + .execute(pool) + .await?; + + // Process based on type + let result = match job.export_type.as_str() { + "proposals" => Self::export_proposals(pool, job.community_id, &job.format).await, + "votes" => Self::export_votes(pool, job.community_id, &job.format).await, + "analytics" => Self::export_analytics(pool, job.community_id, &job.format).await, + _ => Err(PluginError::Message(format!("Unknown export type: {}", job.export_type))), + }; + + match result { + Ok((count, data)) => { + let file_size = data.len() as i64; + sqlx::query!( + r#"UPDATE export_jobs SET + status = 'completed', completed_at = NOW(), + record_count = $2, file_size_bytes = $3, + download_expires_at = NOW() + INTERVAL '7 days' + WHERE id = $1"#, + job.id, + count, + file_size + ) + .execute(pool) + .await?; + } + Err(e) => { + sqlx::query!( + "UPDATE export_jobs SET status = 'failed', error_message = $2 WHERE id = $1", + job.id, + e.to_string() + ) + .execute(pool) + .await?; + } + } + processed += 1; + } + + Ok(processed) + } + + /// Export proposals data + async fn export_proposals( + pool: &PgPool, + community_id: Option, + format: &str, + ) -> Result<(i32, String), PluginError> { + let community_id = community_id.ok_or_else(|| PluginError::Message("Community ID required".into()))?; + + let proposals = sqlx::query!( + r#"SELECT * FROM get_exportable_proposals($1, true, NULL, NULL)"#, + community_id + ) + .fetch_all(pool) + .await?; + + let count = proposals.len() as i32; + let data = match format { + "json" => { + let items: Vec = proposals.iter().map(|p| json!({ + "id": p.id, + "title": p.title, + "author_id": p.author_id, + "status": p.status, + "created_at": p.created_at, + "vote_count": p.vote_count + })).collect(); + serde_json::to_string_pretty(&items).unwrap_or_default() + } + "csv" => { + let mut csv = "id,title,author_id,status,created_at,vote_count\n".to_string(); + for p in &proposals { + csv.push_str(&format!( + "{},{},{},{},{},{}\n", + p.id.map(|u| u.to_string()).unwrap_or_default(), + p.title.as_deref().unwrap_or("").replace(',', ";"), + p.author_id.as_deref().unwrap_or(""), + p.status.as_deref().unwrap_or(""), + p.created_at.map(|t| t.to_string()).unwrap_or_default(), + p.vote_count.unwrap_or(0) + )); + } + csv + } + _ => return Err(PluginError::Message("Unsupported format".into())), + }; + + Ok((count, data)) + } + + /// Export votes data + async fn export_votes( + pool: &PgPool, + community_id: Option, + format: &str, + ) -> Result<(i32, String), PluginError> { + let community_id = community_id.ok_or_else(|| PluginError::Message("Community ID required".into()))?; + + let votes = sqlx::query!( + r#"SELECT + v.id, v.proposal_id, + encode(sha256(v.voter_id::text::bytea), 'hex') AS voter_hash, + v.created_at + FROM votes v + JOIN proposals p ON p.id = v.proposal_id + WHERE p.community_id = $1 + ORDER BY v.created_at DESC"#, + community_id + ) + .fetch_all(pool) + .await?; + + let count = votes.len() as i32; + let data = match format { + "json" => { + let items: Vec = votes.iter().map(|v| json!({ + "id": v.id, + "proposal_id": v.proposal_id, + "voter_hash": v.voter_hash, + "created_at": v.created_at + })).collect(); + serde_json::to_string_pretty(&items).unwrap_or_default() + } + "csv" => { + let mut csv = "id,proposal_id,voter_hash,created_at\n".to_string(); + for v in &votes { + csv.push_str(&format!( + "{},{},{},{}\n", + v.id, v.proposal_id, + v.voter_hash.as_deref().unwrap_or(""), + v.created_at + )); + } + csv + } + _ => return Err(PluginError::Message("Unsupported format".into())), + }; + + Ok((count, data)) + } + + /// Export analytics data + async fn export_analytics( + pool: &PgPool, + community_id: Option, + format: &str, + ) -> Result<(i32, String), PluginError> { + let community_id = community_id.ok_or_else(|| PluginError::Message("Community ID required".into()))?; + + let analytics = sqlx::query!( + r#"SELECT snapshot_date, total_members, active_members, votes_cast + FROM participation_snapshots + WHERE community_id = $1 + ORDER BY snapshot_date DESC + LIMIT 365"#, + community_id + ) + .fetch_all(pool) + .await?; + + let count = analytics.len() as i32; + let data = match format { + "json" => { + let items: Vec = analytics.iter().map(|a| json!({ + "date": a.snapshot_date.to_string(), + "total_members": a.total_members, + "active_members": a.active_members, + "votes_cast": a.votes_cast + })).collect(); + serde_json::to_string_pretty(&items).unwrap_or_default() + } + "csv" => { + let mut csv = "date,total_members,active_members,votes_cast\n".to_string(); + for a in &analytics { + csv.push_str(&format!( + "{},{},{},{}\n", + a.snapshot_date, a.total_members, a.active_members, a.votes_cast + )); + } + csv + } + _ => return Err(PluginError::Message("Unsupported format".into())), + }; + + Ok((count, data)) + } + + /// Record a download + pub async fn record_download( + pool: &PgPool, + job_id: Uuid, + user_id: Option, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"UPDATE export_jobs SET download_count = download_count + 1 WHERE id = $1"#, + job_id + ) + .execute(pool) + .await?; + + sqlx::query!( + r#"INSERT INTO export_audit_log (job_id, action_type, actor_id) + SELECT $1, 'downloaded', $2 + FROM export_jobs WHERE id = $1"#, + job_id, + user_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Cleanup expired exports + pub async fn cleanup_expired(pool: &PgPool) -> Result { + let result = sqlx::query!( + r#"UPDATE export_jobs SET status = 'expired' + WHERE status = 'completed' AND download_expires_at < NOW() + RETURNING id"# + ) + .fetch_all(pool) + .await?; + + Ok(result.len() as i32) + } + + /// Get available exports for a community + pub async fn get_available(pool: &PgPool, community_id: Uuid) -> Result, PluginError> { + let configs = sqlx::query_as!( + ExportConfig, + r#"SELECT id, community_id, name, export_type, public_access + FROM export_configurations + WHERE community_id = $1 AND is_active = true"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(configs) + } + + /// Get job status + pub async fn get_job(pool: &PgPool, job_id: Uuid) -> Result, PluginError> { + let job = sqlx::query_as!( + ExportJob, + r#"SELECT + id, community_id, export_type, + format::text AS "format!", status::text AS "status!", + record_count, download_url + FROM export_jobs WHERE id = $1"#, + job_id + ) + .fetch_optional(pool) + .await?; + + Ok(job) + } +} diff --git a/backend/src/plugins/builtin/self_moderation.rs b/backend/src/plugins/builtin/self_moderation.rs new file mode 100644 index 0000000..6d9eb99 --- /dev/null +++ b/backend/src/plugins/builtin/self_moderation.rs @@ -0,0 +1,530 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct SelfModerationPlugin; + +impl SelfModerationPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for SelfModerationPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "self_moderation_rules", + version: "1.0.0", + description: "Community-configurable moderation rules with escalation", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "default_vote_duration_hours": {"type": "integer", "default": 48}, + "auto_expire_sanctions": {"type": "boolean", "default": true}, + "escalation_cooldown_days": {"type": "integer", "default": 90} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Expire sanctions periodically + system.add_action( + "cron.hour", + "self_moderation_rules".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + ModerationRulesService::expire_sanctions(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Check user sanctions before actions + system.add_action( + "user.action.pre", + "self_moderation_rules".to_string(), + 5, // High priority - run early + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(user_id), Some(community_id), Some(action)) = ( + ctx.actor_user_id, + ctx.community_id, + payload.get("action").and_then(|v| v.as_str()), + ) { + let is_blocked = ModerationRulesService::check_user_blocked( + &ctx.pool, user_id, community_id, action + ).await?; + + if is_blocked { + return Err(PluginError::Message( + "Action blocked due to active sanction".to_string() + )); + } + } + Ok(()) + }) + }), + ); + + // Hook: Log violation when reported + system.add_action( + "moderation.violation.reported", + "self_moderation_rules".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("self_moderation_rules"), + "violation.reported", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + + // Hook: When sanction is applied, log to moderation ledger + system.add_action( + "moderation.sanction.applied", + "self_moderation_rules".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("self_moderation_rules"), + "sanction.applied", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + } + +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CommunityRule { + pub id: Uuid, + pub community_id: Uuid, + pub code: String, + pub title: String, + pub description: String, + pub scope: String, + pub severity: String, + pub is_active: bool, + pub allow_community_vote: bool, +} + +/// Rule violation record. Used for violation tracking. +#[allow(dead_code)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RuleViolation { + pub id: Uuid, + pub community_id: Uuid, + pub rule_id: Uuid, + pub target_user_id: Uuid, + pub status: String, + pub escalation_level: i32, + pub reported_at: chrono::DateTime, +} + +/// Sanction applied to a user. Used for sanction management. +#[allow(dead_code)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Sanction { + pub id: Uuid, + pub violation_id: Uuid, + pub target_user_id: Uuid, + pub sanction_type: String, + pub status: String, + pub expires_at: Option>, +} + +pub struct ModerationRulesService; + +impl ModerationRulesService { + /// Expire sanctions that have passed their duration + pub async fn expire_sanctions(pool: &PgPool) -> Result { + let count: i64 = sqlx::query_scalar("SELECT expire_sanctions()") + .fetch_one(pool) + .await?; + Ok(count as i32) + } + + /// Check if user is blocked from an action + pub async fn check_user_blocked( + pool: &PgPool, + user_id: Uuid, + community_id: Uuid, + action: &str, + ) -> Result { + let sanction_type = match action { + "post" | "comment" => Some("temporary_mute"), + "vote" => Some("voting_suspension"), + "access" => Some("temporary_suspend"), + _ => None, + }; + + if let Some(st) = sanction_type { + let blocked: bool = sqlx::query_scalar( + "SELECT user_has_active_sanction($1, $2, $3::sanction_type)" + ) + .bind(user_id) + .bind(community_id) + .bind(st) + .fetch_one(pool) + .await?; + return Ok(blocked); + } + + // Check for permanent ban + let banned: bool = sqlx::query_scalar( + "SELECT user_has_active_sanction($1, $2, 'permanent_ban'::sanction_type)" + ) + .bind(user_id) + .bind(community_id) + .fetch_one(pool) + .await?; + + Ok(banned) + } + + /// Report a rule violation + pub async fn report_violation( + pool: &PgPool, + community_id: Uuid, + rule_id: Uuid, + target_user_id: Uuid, + reported_by: Option, + reason: &str, + evidence: Option, + ) -> Result { + let violation_id = sqlx::query_scalar!( + r#"INSERT INTO rule_violations ( + community_id, rule_id, target_user_id, + reported_by, report_reason, report_evidence + ) VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id"#, + community_id, + rule_id, + target_user_id, + reported_by, + reason, + evidence + ) + .fetch_one(pool) + .await?; + + Ok(violation_id) + } + + /// Review a violation (moderator action) + pub async fn review_violation( + pool: &PgPool, + violation_id: Uuid, + reviewer_id: Uuid, + confirmed: bool, + notes: Option<&str>, + ) -> Result<(), PluginError> { + if confirmed { + // Get escalation level + let violation = sqlx::query!( + "SELECT target_user_id, community_id, rule_id FROM rule_violations WHERE id = $1", + violation_id + ) + .fetch_one(pool) + .await?; + + let escalation_level: i32 = sqlx::query_scalar( + "SELECT calculate_escalation_level($1, $2, $3)" + ) + .bind(violation.target_user_id) + .bind(violation.community_id) + .bind(violation.rule_id) + .fetch_one(pool) + .await?; + + // Check if community vote is required + let rule = sqlx::query!( + "SELECT allow_community_vote FROM community_rules WHERE id = $1", + violation.rule_id + ) + .fetch_one(pool) + .await?; + + if rule.allow_community_vote && escalation_level >= 2 { + // Start community vote + sqlx::query!( + r#"UPDATE rule_violations + SET status = 'pending_vote', + reviewed_by = $2, + reviewed_at = NOW(), + review_notes = $3, + escalation_level = $4 + WHERE id = $1"#, + violation_id, + reviewer_id, + notes, + escalation_level + ) + .execute(pool) + .await?; + } else { + // Apply sanction directly + sqlx::query!( + r#"UPDATE rule_violations + SET status = 'confirmed', + reviewed_by = $2, + reviewed_at = NOW(), + review_notes = $3, + escalation_level = $4 + WHERE id = $1"#, + violation_id, + reviewer_id, + notes, + escalation_level + ) + .execute(pool) + .await?; + + // Get and apply appropriate sanction + Self::apply_escalated_sanction(pool, violation_id, reviewer_id).await?; + } + } else { + // Dismiss the violation + sqlx::query!( + r#"UPDATE rule_violations + SET status = 'dismissed', + reviewed_by = $2, + reviewed_at = NOW(), + review_notes = $3, + resolved_at = NOW(), + resolution_type = 'dismissed' + WHERE id = $1"#, + violation_id, + reviewer_id, + notes + ) + .execute(pool) + .await?; + } + + Ok(()) + } + + /// Apply escalated sanction based on violation level + pub async fn apply_escalated_sanction( + pool: &PgPool, + violation_id: Uuid, + applied_by: Uuid, + ) -> Result { + let violation = sqlx::query!( + "SELECT community_id, rule_id, escalation_level FROM rule_violations WHERE id = $1", + violation_id + ) + .fetch_one(pool) + .await?; + + // Get appropriate sanction for this escalation level + let sanction = sqlx::query!( + r#"SELECT sanction_type::text AS "sanction_type!", duration_hours + FROM get_escalated_sanction($1, $2, $3)"#, + violation.community_id, + violation.rule_id, + violation.escalation_level + ) + .fetch_one(pool) + .await?; + + // Apply the sanction + let sanction_id: Uuid = sqlx::query_scalar( + "SELECT apply_sanction($1, $2::sanction_type, $3, $4, 'manual')" + ) + .bind(violation_id) + .bind(&sanction.sanction_type) + .bind(sanction.duration_hours) + .bind(applied_by) + .fetch_one(pool) + .await?; + + Ok(sanction_id) + } + + /// Lift a sanction early + pub async fn lift_sanction( + pool: &PgPool, + sanction_id: Uuid, + lifted_by: Uuid, + reason: &str, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"UPDATE sanctions + SET status = 'lifted', lifted_at = NOW(), lifted_by = $2, lift_reason = $3 + WHERE id = $1 AND status = 'active'"#, + sanction_id, + lifted_by, + reason + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Get community rules + pub async fn get_community_rules( + pool: &PgPool, + community_id: Uuid, + ) -> Result, PluginError> { + let rules = sqlx::query_as!( + CommunityRule, + r#"SELECT + id, community_id, code, title, description, + scope::text AS "scope!", severity::text AS "severity!", + is_active, allow_community_vote + FROM community_rules + WHERE community_id = $1 + ORDER BY + CASE severity + WHEN 'critical' THEN 1 + WHEN 'major' THEN 2 + WHEN 'minor' THEN 3 + ELSE 4 + END"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(rules) + } + + /// Create a community rule + pub async fn create_rule( + pool: &PgPool, + community_id: Uuid, + code: &str, + title: &str, + description: &str, + severity: &str, + created_by: Uuid, + ) -> Result { + let rule_id = sqlx::query_scalar!( + r#"INSERT INTO community_rules ( + community_id, code, title, description, severity, created_by + ) VALUES ($1, $2, $3, $4, $5::rule_severity, $6) + RETURNING id"#, + community_id, + code, + title, + description, + severity as _, + created_by + ) + .fetch_one(pool) + .await?; + + Ok(rule_id) + } + + /// Get user's violation summary + pub async fn get_user_summary( + pool: &PgPool, + user_id: Uuid, + community_id: Uuid, + ) -> Result { + let summary = sqlx::query!( + r#"SELECT + total_violations, confirmed_violations, dismissed_violations, + total_sanctions, active_sanctions, warnings_count, + current_escalation_level, last_violation_at, is_in_good_standing + FROM user_violation_summary + WHERE user_id = $1 AND community_id = $2"#, + user_id, + community_id + ) + .fetch_optional(pool) + .await?; + + match summary { + Some(s) => Ok(json!({ + "total_violations": s.total_violations, + "confirmed_violations": s.confirmed_violations, + "dismissed_violations": s.dismissed_violations, + "total_sanctions": s.total_sanctions, + "active_sanctions": s.active_sanctions, + "warnings": s.warnings_count, + "escalation_level": s.current_escalation_level, + "last_violation": s.last_violation_at, + "in_good_standing": s.is_in_good_standing + })), + None => Ok(json!({ + "total_violations": 0, + "confirmed_violations": 0, + "in_good_standing": true + })), + } + } + + /// Get pending violations for moderation queue + pub async fn get_pending_violations( + pool: &PgPool, + community_id: Uuid, + ) -> Result, PluginError> { + let violations = sqlx::query!( + r#"SELECT + rv.id, cr.code AS rule_code, cr.title AS rule_title, + cr.severity::text AS "severity!", + rv.target_user_id, tu.username AS target_username, + rv.reported_by, rv.status::text AS "status!", + rv.reported_at, rv.report_reason + FROM rule_violations rv + JOIN community_rules cr ON cr.id = rv.rule_id + JOIN users tu ON tu.id = rv.target_user_id + WHERE rv.community_id = $1 + AND rv.status IN ('reported', 'under_review') + ORDER BY + CASE cr.severity + WHEN 'critical' THEN 1 + WHEN 'major' THEN 2 + ELSE 3 + END, + rv.reported_at"#, + community_id + ) + .fetch_all(pool) + .await?; + + Ok(violations + .into_iter() + .map(|v| json!({ + "id": v.id, + "rule_code": v.rule_code, + "rule_title": v.rule_title, + "severity": v.severity, + "target_user_id": v.target_user_id, + "target_username": v.target_username, + "reported_by": v.reported_by, + "status": v.status, + "reported_at": v.reported_at, + "reason": v.report_reason + })) + .collect()) + } +} diff --git a/backend/src/plugins/builtin/structured_deliberation.rs b/backend/src/plugins/builtin/structured_deliberation.rs new file mode 100644 index 0000000..11698d4 --- /dev/null +++ b/backend/src/plugins/builtin/structured_deliberation.rs @@ -0,0 +1,429 @@ +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use sqlx::PgPool; +use std::sync::Arc; +use uuid::Uuid; + +use crate::plugins::{ + hooks::HookContext, + manager::PluginSystem, + Plugin, PluginError, PluginMetadata, PluginScope, +}; + +pub struct StructuredDeliberationPlugin; + +impl StructuredDeliberationPlugin { + pub fn new() -> Self { + Self + } +} + +#[async_trait] +impl Plugin for StructuredDeliberationPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: "structured_deliberation", + version: "1.0.0", + description: "Pro/con arguments and collaborative summaries", + is_core: false, + scope: PluginScope::Community, + default_enabled: true, + settings_schema: Some(json!({ + "type": "object", + "properties": { + "require_reading": {"type": "boolean", "default": true}, + "enable_summaries": {"type": "boolean", "default": true} + } + })), + } + } + + fn register(&self, system: &mut PluginSystem) { + // Hook: Check reading requirement before comment + system.add_action( + "comment.pre_create", + "structured_deliberation".to_string(), + 5, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + if let (Some(proposal_id), Some(user_id)) = ( + payload.get("proposal_id").and_then(|v| v.as_str()).and_then(|s| Uuid::parse_str(s).ok()), + ctx.actor_user_id, + ) { + let can_comment = DeliberationService::check_can_participate( + &ctx.pool, proposal_id, user_id, "comment" + ).await?; + + if !can_comment { + return Err(PluginError::Message( + "Please read the proposal before commenting".to_string() + )); + } + } + Ok(()) + }) + }), + ); + + // Hook: Calculate metrics periodically + system.add_action( + "cron.hourly", + "structured_deliberation".to_string(), + 50, + Arc::new(|ctx: HookContext, _payload: Value| { + Box::pin(async move { + DeliberationService::update_all_metrics(&ctx.pool).await?; + Ok(()) + }) + }), + ); + + // Hook: Track argument creation + system.add_action( + "deliberation.argument.created", + "structured_deliberation".to_string(), + 50, + Arc::new(|ctx: HookContext, payload: Value| { + Box::pin(async move { + ctx.emit_public_event( + Some("structured_deliberation"), + "argument.created", + payload.clone(), + ).await?; + Ok(()) + }) + }), + ); + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Argument { + pub id: Uuid, + pub proposal_id: Uuid, + pub stance: String, + pub title: String, + pub content: String, + pub author_id: Uuid, + pub upvotes: i32, + pub downvotes: i32, + pub quality_score: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Summary { + pub id: Uuid, + pub proposal_id: Uuid, + pub summary_type: String, + pub content: String, + pub version: i32, + pub is_approved: bool, +} + +pub struct DeliberationService; + +impl DeliberationService { + /// Check if user can participate based on reading requirements + pub async fn check_can_participate( + pool: &PgPool, + proposal_id: Uuid, + user_id: Uuid, + action: &str, + ) -> Result { + let reading = sqlx::query!( + r#"SELECT can_comment, can_vote FROM deliberation_reading_log + WHERE proposal_id = $1 AND user_id = $2"#, + proposal_id, + user_id + ) + .fetch_optional(pool) + .await?; + + match reading { + Some(r) => Ok(match action { + "comment" => r.can_comment.unwrap_or(false), + "vote" => r.can_vote.unwrap_or(false), + _ => true, + }), + None => Ok(false), // No reading record + } + } + + /// Record that user has read content + pub async fn record_reading( + pool: &PgPool, + proposal_id: Uuid, + user_id: Uuid, + read_type: &str, + time_seconds: i32, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"INSERT INTO deliberation_reading_log (proposal_id, user_id, first_read_at, reading_time_seconds) + VALUES ($1, $2, NOW(), $3) + ON CONFLICT (proposal_id, user_id) DO UPDATE SET + read_proposal = CASE WHEN $4 = 'proposal' THEN true ELSE deliberation_reading_log.read_proposal END, + read_summaries = CASE WHEN $4 = 'summaries' THEN true ELSE deliberation_reading_log.read_summaries END, + read_top_arguments = CASE WHEN $4 = 'arguments' THEN true ELSE deliberation_reading_log.read_top_arguments END, + reading_time_seconds = deliberation_reading_log.reading_time_seconds + $3, + updated_at = NOW()"#, + proposal_id, + user_id, + time_seconds, + read_type + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Add an argument + pub async fn add_argument( + pool: &PgPool, + proposal_id: Uuid, + parent_id: Option, + stance: &str, + title: &str, + content: &str, + author_id: Uuid, + ) -> Result { + let argument_id: Uuid = sqlx::query_scalar( + "SELECT add_deliberation_argument($1, $2, $3::argument_stance, $4, $5, $6)" + ) + .bind(proposal_id) + .bind(parent_id) + .bind(stance) + .bind(title) + .bind(content) + .bind(author_id) + .fetch_one(pool) + .await?; + + Ok(argument_id) + } + + /// Vote on an argument + pub async fn vote_argument( + pool: &PgPool, + argument_id: Uuid, + user_id: Uuid, + vote_type: &str, + ) -> Result<(), PluginError> { + sqlx::query("SELECT vote_on_argument($1, $2, $3)") + .bind(argument_id) + .bind(user_id) + .bind(vote_type) + .execute(pool) + .await?; + + Ok(()) + } + + /// Get arguments for a proposal + pub async fn get_arguments( + pool: &PgPool, + proposal_id: Uuid, + stance: Option<&str>, + limit: i64, + ) -> Result, PluginError> { + let arguments = if let Some(s) = stance { + sqlx::query_as!( + Argument, + r#"SELECT + id, proposal_id, stance::text AS "stance!", + title, content, author_id, + upvotes, downvotes, quality_score::float8 AS quality_score + FROM deliberation_arguments + WHERE proposal_id = $1 AND stance::text = $2 AND NOT is_hidden AND parent_id IS NULL + ORDER BY quality_score DESC NULLS LAST + LIMIT $3"#, + proposal_id, + s, + limit + ) + .fetch_all(pool) + .await? + } else { + sqlx::query_as!( + Argument, + r#"SELECT + id, proposal_id, stance::text AS "stance!", + title, content, author_id, + upvotes, downvotes, quality_score::float8 AS quality_score + FROM deliberation_arguments + WHERE proposal_id = $1 AND NOT is_hidden AND parent_id IS NULL + ORDER BY quality_score DESC NULLS LAST + LIMIT $2"#, + proposal_id, + limit + ) + .fetch_all(pool) + .await? + }; + + Ok(arguments) + } + + /// Create or update a summary + pub async fn upsert_summary( + pool: &PgPool, + proposal_id: Uuid, + summary_type: &str, + content: &str, + key_points: Value, + editor_id: Uuid, + ) -> Result { + // Check if summary exists + let existing = sqlx::query_scalar!( + "SELECT id FROM deliberation_summaries WHERE proposal_id = $1 AND summary_type = $2::summary_type", + proposal_id, + summary_type as _ + ) + .fetch_optional(pool) + .await?; + + if let Some(summary_id) = existing { + // Save to history + sqlx::query!( + r#"INSERT INTO summary_edit_history (summary_id, version, content, key_points, editor_id) + SELECT id, version, content, key_points, last_editor_id + FROM deliberation_summaries WHERE id = $1"#, + summary_id + ) + .execute(pool) + .await?; + + // Update + sqlx::query!( + r#"UPDATE deliberation_summaries SET + content = $2, key_points = $3, last_editor_id = $4, + version = version + 1, edit_count = edit_count + 1, + is_approved = false, updated_at = NOW() + WHERE id = $1"#, + summary_id, + content, + key_points, + editor_id + ) + .execute(pool) + .await?; + + Ok(summary_id) + } else { + // Create new + let summary_id = sqlx::query_scalar!( + r#"INSERT INTO deliberation_summaries ( + proposal_id, summary_type, content, key_points, last_editor_id + ) VALUES ($1, $2::summary_type, $3, $4, $5) + RETURNING id"#, + proposal_id, + summary_type as _, + content, + key_points, + editor_id + ) + .fetch_one(pool) + .await?; + + Ok(summary_id) + } + } + + /// Get summaries for a proposal + pub async fn get_summaries( + pool: &PgPool, + proposal_id: Uuid, + ) -> Result, PluginError> { + let summaries = sqlx::query_as!( + Summary, + r#"SELECT + id, proposal_id, summary_type::text AS "summary_type!", + content, version, is_approved + FROM deliberation_summaries + WHERE proposal_id = $1 + ORDER BY summary_type"#, + proposal_id + ) + .fetch_all(pool) + .await?; + + Ok(summaries) + } + + /// Approve a summary + pub async fn approve_summary( + pool: &PgPool, + summary_id: Uuid, + approver_id: Uuid, + ) -> Result<(), PluginError> { + sqlx::query!( + r#"UPDATE deliberation_summaries SET + is_approved = true, approved_by = $2, approved_at = NOW() + WHERE id = $1"#, + summary_id, + approver_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Update metrics for all active proposals + pub async fn update_all_metrics(pool: &PgPool) -> Result { + let proposals = sqlx::query_scalar!( + r#"SELECT DISTINCT proposal_id FROM deliberation_arguments + WHERE created_at > NOW() - INTERVAL '1 day'"# + ) + .fetch_all(pool) + .await?; + + let mut count = 0; + for proposal_id in proposals { + sqlx::query("SELECT calculate_deliberation_metrics($1)") + .bind(proposal_id) + .execute(pool) + .await?; + count += 1; + } + + Ok(count) + } + + /// Get deliberation overview + pub async fn get_overview(pool: &PgPool, proposal_id: Uuid) -> Result { + let metrics = sqlx::query!( + r#"SELECT + total_arguments, pro_arguments, con_arguments, neutral_arguments, + unique_participants, substantive_ratio::float8 AS substantive_ratio, + balance_score::float8 AS balance_score + FROM deliberation_metrics + WHERE proposal_id = $1 + ORDER BY calculated_at DESC + LIMIT 1"#, + proposal_id + ) + .fetch_optional(pool) + .await?; + + let top_pro = Self::get_arguments(pool, proposal_id, Some("pro"), 3).await?; + let top_con = Self::get_arguments(pool, proposal_id, Some("con"), 3).await?; + let summaries = Self::get_summaries(pool, proposal_id).await?; + + Ok(json!({ + "metrics": metrics.map(|m| json!({ + "total_arguments": m.total_arguments, + "pro": m.pro_arguments, + "con": m.con_arguments, + "neutral": m.neutral_arguments, + "participants": m.unique_participants, + "substantive_ratio": m.substantive_ratio, + "balance_score": m.balance_score + })), + "top_pro_arguments": top_pro, + "top_con_arguments": top_con, + "summaries": summaries + })) + } +} diff --git a/backend/src/plugins/hooks.rs b/backend/src/plugins/hooks.rs new file mode 100644 index 0000000..76e8133 --- /dev/null +++ b/backend/src/plugins/hooks.rs @@ -0,0 +1,118 @@ +use std::{ + collections::HashMap, + future::Future, + pin::Pin, + sync::Arc, +}; + +use chrono::{DateTime, Utc}; +use serde_json::Value; +use sqlx::PgPool; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct HookContext { + pub pool: PgPool, + pub community_id: Option, + pub actor_user_id: Option, +} + +impl HookContext { + pub async fn emit_public_event( + &self, + plugin_name: Option<&str>, + event_type: &str, + payload: Value, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, $3, $4, $5)"#, + self.community_id, + self.actor_user_id, + plugin_name, + event_type, + payload + ) + .execute(&self.pool) + .await?; + + Ok(()) + } +} + +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct HookInvocation { + pub hook: String, + pub plugin: String, + pub priority: i32, + pub invoked_at: DateTime, +} + +#[derive(thiserror::Error, Debug)] +pub enum PluginError { + #[error("plugin error: {0}")] + Message(String), + #[error(transparent)] + Sqlx(#[from] sqlx::Error), +} + +pub type ActionFuture = Pin> + Send>>; +pub type FilterFuture = Pin> + Send>>; + +pub type ActionCallback = Arc ActionFuture + Send + Sync>; +pub type FilterCallback = Arc FilterFuture + Send + Sync>; + +#[derive(Clone)] +pub struct ActionHandler { + pub plugin: String, + pub priority: i32, + pub callback: ActionCallback, +} + +#[derive(Clone)] +pub struct FilterHandler { + pub plugin: String, + #[allow(dead_code)] + pub priority: i32, + pub callback: FilterCallback, +} + +#[derive(Default)] +pub struct HookRegistry { + actions: HashMap>, + filters: HashMap>, +} + +impl HookRegistry { + pub fn new() -> Self { + Self::default() + } + + pub fn add_action(&mut self, hook: &str, handler: ActionHandler) { + let entry = self.actions.entry(hook.to_string()).or_default(); + entry.push(handler); + entry.sort_by_key(|h| h.priority); + } + + #[allow(dead_code)] + pub fn add_filter(&mut self, hook: &str, handler: FilterHandler) { + let entry = self.filters.entry(hook.to_string()).or_default(); + entry.push(handler); + entry.sort_by_key(|h| h.priority); + } + + pub fn actions_for(&self, hook: &str) -> &[ActionHandler] { + self.actions + .get(hook) + .map(|v| v.as_slice()) + .unwrap_or(&[]) + } + + pub fn filters_for(&self, hook: &str) -> &[FilterHandler] { + self.filters + .get(hook) + .map(|v| v.as_slice()) + .unwrap_or(&[]) + } +} diff --git a/backend/src/plugins/manager.rs b/backend/src/plugins/manager.rs new file mode 100644 index 0000000..08aa934 --- /dev/null +++ b/backend/src/plugins/manager.rs @@ -0,0 +1,673 @@ +use std::{collections::{HashMap, HashSet}, sync::Arc}; + +use async_trait::async_trait; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use crate::plugins::hooks::{ActionHandler, FilterHandler, HookContext, HookRegistry, PluginError}; +use crate::plugins::wasm::WasmPlugin; +use crate::plugins::wasm::host_api::PluginManifest; +use crate::plugins::wasm::runtime::WasmRuntime; + +#[derive(Debug, Clone, Copy)] +pub enum PluginScope { + #[allow(dead_code)] + Global, + Community, +} + +#[derive(Debug, Clone)] +pub struct PluginMetadata { + pub name: &'static str, + pub version: &'static str, + pub description: &'static str, + pub is_core: bool, + pub scope: PluginScope, + pub default_enabled: bool, + pub settings_schema: Option, +} + +#[async_trait] +pub trait Plugin: Send + Sync { + fn metadata(&self) -> PluginMetadata; + fn register(&self, system: &mut PluginSystem); + + async fn activate(&self, _ctx: HookContext, _settings: Value) -> Result<(), PluginError> { + Ok(()) + } + + async fn deactivate(&self, _ctx: HookContext, _settings: Value) -> Result<(), PluginError> { + Ok(()) + } + + async fn settings_updated( + &self, + _ctx: HookContext, + _old_settings: Value, + _new_settings: Value, + ) -> Result<(), PluginError> { + Ok(()) + } +} + +pub struct PluginSystem { + hooks: HookRegistry, +} + +impl PluginSystem { + pub fn new() -> Self { + Self { + hooks: HookRegistry::new(), + } + } + + pub fn add_action( + &mut self, + hook: &str, + plugin: String, + priority: i32, + callback: crate::plugins::hooks::ActionCallback, + ) { + self.hooks.add_action( + hook, + ActionHandler { + plugin, + priority, + callback, + }, + ); + } + + #[allow(dead_code)] + pub fn add_filter( + &mut self, + hook: &str, + plugin: String, + priority: i32, + callback: crate::plugins::hooks::FilterCallback, + ) { + self.hooks.add_filter( + hook, + FilterHandler { + plugin, + priority, + callback, + }, + ); + } +} + +pub struct PluginManager { + pool: PgPool, + system: PluginSystem, + plugins: Vec>, + wasm_plugins: HashMap>, + wasm_runtime: Option, +} + +impl PluginManager { + pub fn new(pool: PgPool) -> Self { + let wasm_runtime = WasmRuntime::new().ok(); + if wasm_runtime.is_none() { + tracing::warn!("Failed to initialize WASM runtime - third-party plugins disabled"); + } + Self { + pool, + system: PluginSystem::new(), + plugins: Vec::new(), + wasm_plugins: HashMap::new(), + wasm_runtime, + } + } + + pub fn register_plugin(&mut self, plugin: Arc) { + self.plugins.push(plugin); + } + + pub fn register_builtin_plugins(mut self) -> Self { + self.register_plugin(Arc::new( + crate::plugins::builtin::comment_notifications::CommentNotificationsPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::moderation_ledger::ModerationLedgerPlugin, + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::decision_workflows::DecisionWorkflowsPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::self_moderation::SelfModerationPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::proposal_lifecycle::ProposalLifecyclePlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::governance_analytics::GovernanceAnalyticsPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::conflict_resolution::ConflictResolutionPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::structured_deliberation::StructuredDeliberationPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::public_data_export::PublicDataExportPlugin::new(), + )); + self.register_plugin(Arc::new( + crate::plugins::builtin::federation::FederationPlugin::new(), + )); + self + } + + pub async fn ensure_default_community_plugins( + &self, + community_id: Uuid, + actor_user_id: Option, + ) -> Result<(), PluginError> { + let default_names: Vec = self + .plugins + .iter() + .map(|p| p.metadata()) + .filter(|m| matches!(m.scope, PluginScope::Community) && m.default_enabled) + .map(|m| m.name.to_string()) + .collect(); + + if default_names.is_empty() { + return Ok(()); + } + + let before = sqlx::query!( + r#" + SELECT + p.name, + COALESCE(cp.is_active, false) as "is_active!", + COALESCE(cp.settings, '{}'::jsonb) as "settings!: serde_json::Value" + FROM plugins p + LEFT JOIN community_plugins cp + ON cp.plugin_id = p.id AND cp.community_id = $1 + WHERE p.is_active = true + AND p.name = ANY($2) + "#, + community_id, + &default_names + ) + .fetch_all(&self.pool) + .await?; + + sqlx::query!( + r#" + INSERT INTO community_plugins (community_id, plugin_id, settings, is_active) + SELECT $1, p.id, '{}'::jsonb, true + FROM plugins p + WHERE p.is_active = true + AND p.name = ANY($2) + ON CONFLICT (community_id, plugin_id) + DO UPDATE SET is_active = EXCLUDED.is_active + "#, + community_id, + &default_names + ) + .execute(&self.pool) + .await?; + + let after = sqlx::query!( + r#" + SELECT + p.name, + COALESCE(cp.is_active, false) as "is_active!", + COALESCE(cp.settings, '{}'::jsonb) as "settings!: serde_json::Value" + FROM plugins p + LEFT JOIN community_plugins cp + ON cp.plugin_id = p.id AND cp.community_id = $1 + WHERE p.is_active = true + AND p.name = ANY($2) + "#, + community_id, + &default_names + ) + .fetch_all(&self.pool) + .await?; + + let ctx = HookContext { + pool: self.pool.clone(), + community_id: Some(community_id), + actor_user_id, + }; + + for row in after { + let was_active = before + .iter() + .find(|b| b.name == row.name) + .map(|b| b.is_active) + .unwrap_or(false); + + if !was_active && row.is_active { + self.invoke_activate(&row.name, ctx.clone(), row.settings.clone()) + .await; + + let _ = ctx + .emit_public_event( + Some(&row.name), + "plugin.activated", + json!({"reason": "default_enabled"}), + ) + .await; + + self.do_action( + "plugin.activated", + ctx.clone(), + json!({"plugin": row.name, "reason": "default_enabled"}), + ) + .await; + } + } + + Ok(()) + } + + async fn load_wasm_plugins(&mut self) -> Result<(), PluginError> { + let Some(ref runtime) = self.wasm_runtime else { + return Ok(()); + }; + + let packages = sqlx::query!( + r#" + SELECT DISTINCT pp.id, pp.name, pp.wasm_bytes, pp.manifest + FROM plugin_packages pp + JOIN community_plugin_packages cpp ON cpp.package_id = pp.id + WHERE cpp.is_active = true + "# + ) + .fetch_all(&self.pool) + .await?; + + for pkg in packages { + let manifest: PluginManifest = match serde_json::from_value(pkg.manifest.clone()) { + Ok(m) => m, + Err(e) => { + tracing::error!( + plugin = %pkg.name, + "Failed to parse WASM plugin manifest: {}", e + ); + let _ = sqlx::query!( + "UPDATE community_plugin_packages SET is_active = false WHERE package_id = $1", + pkg.id + ) + .execute(&self.pool) + .await; + continue; + } + }; + + let compiled = match runtime.compile(&pkg.wasm_bytes) { + Ok(c) => Arc::new(c), + Err(e) => { + tracing::error!( + plugin = %pkg.name, + "Failed to compile WASM plugin: {}", e + ); + continue; + } + }; + + let wasm_plugin = Arc::new(WasmPlugin::new(pkg.id, manifest, compiled)); + self.wasm_plugins.insert(pkg.id, wasm_plugin.clone()); + self.plugins.push(wasm_plugin); + + tracing::info!(plugin = %pkg.name, "Loaded WASM plugin"); + } + + Ok(()) + } + + pub async fn initialize(mut self) -> Result, PluginError> { + for plugin in &self.plugins { + let meta = plugin.metadata(); + sqlx::query!( + r#"INSERT INTO plugins (name, version, description, is_core, is_active, settings_schema) + VALUES ($1, $2, $3, $4, true, $5) + ON CONFLICT (name) DO UPDATE + SET version = EXCLUDED.version, + description = EXCLUDED.description, + is_core = EXCLUDED.is_core, + settings_schema = EXCLUDED.settings_schema"#, + meta.name, + meta.version, + meta.description, + meta.is_core, + meta.settings_schema + ) + .execute(&self.pool) + .await?; + } + + if let Err(e) = self.load_wasm_plugins().await { + tracing::error!("Failed to load WASM plugins: {}", e); + } + + let communities = sqlx::query!("SELECT id FROM communities WHERE is_active = true") + .fetch_all(&self.pool) + .await?; + + for community in communities { + self.ensure_default_community_plugins(community.id, None).await?; + } + + for plugin in &self.plugins { + plugin.register(&mut self.system); + } + + Ok(Arc::new(self)) + } + + async fn active_plugins(&self, community_id: Option) -> Result, PluginError> { + let mut active: HashSet = HashSet::new(); + + let core = sqlx::query!("SELECT name FROM plugins WHERE is_active = true AND is_core = true") + .fetch_all(&self.pool) + .await?; + for row in core { + active.insert(row.name); + } + + let global_names: Vec = self + .plugins + .iter() + .filter(|p| matches!(p.metadata().scope, PluginScope::Global)) + .map(|p| p.metadata().name.to_string()) + .collect(); + + if !global_names.is_empty() { + let global = sqlx::query!( + "SELECT name FROM plugins WHERE is_active = true AND name = ANY($1)", + &global_names + ) + .fetch_all(&self.pool) + .await?; + for row in global { + active.insert(row.name); + } + } + + if let Some(cid) = community_id { + let community = sqlx::query!( + r#"SELECT p.name + FROM plugins p + JOIN community_plugins cp ON cp.plugin_id = p.id + WHERE cp.community_id = $1 AND cp.is_active = true AND p.is_active = true"#, + cid + ) + .fetch_all(&self.pool) + .await?; + for row in community { + active.insert(row.name); + } + + // WASM plugin packages enabled for this community. + let wasm = sqlx::query!( + r#"SELECT DISTINCT pp.id + FROM plugin_packages pp + JOIN community_plugin_packages cpp ON cpp.package_id = pp.id + WHERE cpp.community_id = $1 + AND cpp.is_active = true"#, + cid + ) + .fetch_all(&self.pool) + .await?; + for row in wasm { + active.insert(format!("wasm:{}", row.id)); + } + } else { + let community = sqlx::query!( + r#"SELECT DISTINCT p.name + FROM plugins p + JOIN community_plugins cp ON cp.plugin_id = p.id + WHERE cp.is_active = true AND p.is_active = true"# + ) + .fetch_all(&self.pool) + .await?; + for row in community { + active.insert(row.name); + } + } + + Ok(active) + } + + pub async fn do_action(&self, hook: &str, ctx: HookContext, payload: Value) { + let active = match self.active_plugins(ctx.community_id).await { + Ok(s) => s, + Err(e) => { + tracing::error!("Failed to resolve active plugins for hook {}: {}", hook, e); + return; + } + }; + + for handler in self.system.hooks.actions_for(hook) { + if !active.contains(&handler.plugin) { + continue; + } + + if let Err(e) = (handler.callback)(ctx.clone(), payload.clone()).await { + tracing::error!("Plugin {} action {} failed: {}", handler.plugin, hook, e); + } + } + } + + pub async fn do_wasm_action_for_community(&self, hook: &str, community_id: Uuid, payload: Value) { + let wasm = match sqlx::query!( + r#"SELECT DISTINCT pp.id + FROM plugin_packages pp + JOIN community_plugin_packages cpp ON cpp.package_id = pp.id + WHERE cpp.community_id = $1 + AND cpp.is_active = true"#, + community_id + ) + .fetch_all(&self.pool) + .await + { + Ok(rows) => rows, + Err(e) => { + tracing::error!("Failed to resolve active WASM plugins for hook {}: {}", hook, e); + return; + } + }; + + let mut active: HashSet = HashSet::new(); + for row in wasm { + active.insert(format!("wasm:{}", row.id)); + } + + if active.is_empty() { + return; + } + + let ctx = HookContext { + pool: self.pool.clone(), + community_id: Some(community_id), + actor_user_id: None, + }; + + for handler in self.system.hooks.actions_for(hook) { + if !handler.plugin.starts_with("wasm:") { + continue; + } + if !active.contains(&handler.plugin) { + continue; + } + + if let Err(e) = (handler.callback)(ctx.clone(), payload.clone()).await { + tracing::error!("Plugin {} action {} failed: {}", handler.plugin, hook, e); + } + } + } + + pub async fn apply_filters( + &self, + hook: &str, + ctx: HookContext, + initial: Value, + ) -> Result { + let active = self.active_plugins(ctx.community_id).await?; + let mut value = initial; + + for handler in self.system.hooks.filters_for(hook) { + if !active.contains(&handler.plugin) { + continue; + } + value = (handler.callback)(ctx.clone(), value).await?; + } + + Ok(value) + } + + fn plugin_by_name(&self, plugin_name: &str) -> Option> { + for p in &self.plugins { + if p.metadata().name == plugin_name { + return Some(p.clone()); + } + } + None + } + + fn wasm_plugin_by_package_id(&self, package_id: Uuid) -> Option> { + self.wasm_plugins.get(&package_id).cloned() + } + + async fn invoke_activate(&self, plugin_name: &str, ctx: HookContext, settings: Value) { + let Some(plugin) = self.plugin_by_name(plugin_name) else { + return; + }; + + if let Err(e) = plugin.activate(ctx, settings).await { + tracing::error!("Plugin {} activate failed: {}", plugin_name, e); + } + } + + async fn invoke_deactivate(&self, plugin_name: &str, ctx: HookContext, settings: Value) { + let Some(plugin) = self.plugin_by_name(plugin_name) else { + return; + }; + + if let Err(e) = plugin.deactivate(ctx, settings).await { + tracing::error!("Plugin {} deactivate failed: {}", plugin_name, e); + } + } + + async fn invoke_settings_updated( + &self, + plugin_name: &str, + ctx: HookContext, + old_settings: Value, + new_settings: Value, + ) { + let Some(plugin) = self.plugin_by_name(plugin_name) else { + return; + }; + + if let Err(e) = plugin + .settings_updated(ctx, old_settings, new_settings) + .await + { + tracing::error!("Plugin {} settings_updated failed: {}", plugin_name, e); + } + } + + pub async fn handle_community_plugin_change( + &self, + community_id: Uuid, + actor_user_id: Option, + plugin_name: &str, + old_is_active: bool, + old_settings: Value, + new_is_active: bool, + new_settings: Value, + ) { + let ctx = HookContext { + pool: self.pool.clone(), + community_id: Some(community_id), + actor_user_id, + }; + + if old_settings != new_settings { + self.invoke_settings_updated( + plugin_name, + ctx.clone(), + old_settings.clone(), + new_settings.clone(), + ) + .await; + + self.do_action( + "plugin.settings_updated", + ctx.clone(), + json!({"plugin": plugin_name}), + ) + .await; + } + + if !old_is_active && new_is_active { + self.invoke_activate(plugin_name, ctx.clone(), new_settings.clone()) + .await; + self.do_action( + "plugin.activated", + ctx.clone(), + json!({"plugin": plugin_name}), + ) + .await; + } + + if old_is_active && !new_is_active { + self.invoke_deactivate(plugin_name, ctx.clone(), old_settings.clone()) + .await; + self.do_action( + "plugin.deactivated", + ctx, + json!({"plugin": plugin_name}), + ) + .await; + } + } + + pub async fn handle_community_plugin_package_change( + &self, + community_id: Uuid, + actor_user_id: Option, + package_id: Uuid, + old_is_active: bool, + old_settings: Value, + new_is_active: bool, + new_settings: Value, + ) { + let Some(plugin) = self.wasm_plugin_by_package_id(package_id) else { + return; + }; + + let ctx = HookContext { + pool: self.pool.clone(), + community_id: Some(community_id), + actor_user_id, + }; + + if old_settings != new_settings { + if let Err(e) = plugin + .settings_updated(ctx.clone(), old_settings.clone(), new_settings.clone()) + .await + { + tracing::error!("WASM package {} settings_updated failed: {}", package_id, e); + } + } + + if !old_is_active && new_is_active { + if let Err(e) = plugin.activate(ctx.clone(), new_settings.clone()).await { + tracing::error!("WASM package {} activate failed: {}", package_id, e); + } + } + + if old_is_active && !new_is_active { + if let Err(e) = plugin.deactivate(ctx.clone(), old_settings.clone()).await { + tracing::error!("WASM package {} deactivate failed: {}", package_id, e); + } + } + } +} diff --git a/backend/src/plugins/mod.rs b/backend/src/plugins/mod.rs new file mode 100644 index 0000000..7499565 --- /dev/null +++ b/backend/src/plugins/mod.rs @@ -0,0 +1,7 @@ +pub mod builtin; +pub mod hooks; +pub mod manager; +pub mod wasm; + +pub use hooks::{HookContext, PluginError}; +pub use manager::{Plugin, PluginManager, PluginMetadata, PluginScope, PluginSystem}; diff --git a/backend/src/plugins/wasm/host_api.rs b/backend/src/plugins/wasm/host_api.rs new file mode 100644 index 0000000..0875f4d --- /dev/null +++ b/backend/src/plugins/wasm/host_api.rs @@ -0,0 +1,570 @@ +//! Host API for WASM plugins. +//! +//! Provides the bridge between WASM plugin code and the host environment, +//! including logging, settings access, KV store, and HTTP capabilities. + +use std::collections::HashMap; +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use sqlx::PgPool; +use tokio::runtime::Handle; +use tokio::task::block_in_place; +use tokio::sync::Mutex; +use uuid::Uuid; +use wasmtime::{Linker, StoreLimits}; + +use crate::plugins::PluginError; + +/// Capability identifiers +pub const CAP_KV_STORE: &str = "kv_store"; +pub const CAP_EMIT_EVENTS: &str = "emit_events"; +pub const CAP_SETTINGS: &str = "settings"; + +/// Capability identifier for outbound HTTP requests. +pub const CAP_OUTBOUND_HTTP: &str = "outbound_http"; + +/// Plugin capability configuration. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Capability { + pub name: String, + pub allowed: bool, + pub config: Value, +} + +/// Host state accessible to WASM plugins during execution. +#[derive(Clone)] +#[allow(dead_code)] // Fields used by host functions when fully implemented +pub struct HostState { + pub plugin_name: String, + pub community_id: Option, + pub actor_user_id: Option, + pub pool: PgPool, + pub package_id: Uuid, + pub capabilities: Vec, + pub log_buffer: Arc>>, + pub kv_cache: Arc>>, + pub result_buffer: Arc>, + pub egress_allowlist: Vec, +} + +impl HostState { + pub fn new( + plugin_name: String, + community_id: Option, + actor_user_id: Option, + pool: PgPool, + package_id: Uuid, + capabilities: Vec, + ) -> Self { + let egress_allowlist = capabilities + .iter() + .find(|c| c.name == CAP_OUTBOUND_HTTP && c.allowed) + .and_then(|c| c.config.get("allowlist")) + .and_then(|v| v.as_array()) + .map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect()) + .unwrap_or_default(); + + Self { + plugin_name, + community_id, + actor_user_id, + pool, + package_id, + capabilities, + log_buffer: Arc::new(Mutex::new(Vec::new())), + kv_cache: Arc::new(Mutex::new(HashMap::new())), + result_buffer: Arc::new(std::sync::Mutex::new(ResultBuffer::default())), + egress_allowlist, + } + } + + pub fn has_capability(&self, name: &str) -> bool { + self.capabilities.iter().any(|c| c.name == name && c.allowed) + } + + pub fn is_url_allowed(&self, url: &str) -> bool { + if self.egress_allowlist.is_empty() { + return false; + } + if self.egress_allowlist.iter().any(|p| p == "*") { + return true; + } + if let Ok(parsed) = reqwest::Url::parse(url) { + if let Some(host) = parsed.host_str() { + return self.egress_allowlist.iter().any(|pattern| { + if pattern.starts_with("*.") { + let suffix = &pattern[1..]; + host.ends_with(suffix) || host == &pattern[2..] + } else { + host == pattern + } + }); + } + } + false + } +} + +/// Combined state with resource limits for WASM execution. +pub struct HostStateWithLimits { + pub inner: HostState, + pub limits: StoreLimits, +} + +/// Result buffer for returning data to WASM +#[derive(Default)] +pub struct ResultBuffer { + pub data: Vec, +} + +impl HostStateWithLimits { + /// Store a result in the result buffer and return its length + pub fn store_result(&mut self, data: &[u8]) -> u32 { + self.inner.result_buffer.lock().unwrap().data = data.to_vec(); + data.len() as u32 + } + + /// Get the result buffer contents + pub fn get_result(&self) -> Vec { + self.inner.result_buffer.lock().unwrap().data.clone() + } +} + +/// Registers host functions for WASM plugins. +pub fn register_host_functions(linker: &mut Linker) -> Result<(), PluginError> { + // host_log: Allow plugins to emit log messages + linker + .func_wrap("env", "host_log", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, ptr: u32, len: u32, level: u32| { + let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + if let Some(mem) = memory { + let mut buf = vec![0u8; len as usize]; + if mem.read(&caller, ptr as usize, &mut buf).is_ok() { + if let Ok(msg) = String::from_utf8(buf) { + let level_str = match level { + 0 => "TRACE", + 1 => "DEBUG", + 2 => "INFO", + 3 => "WARN", + _ => "ERROR", + }; + let plugin_name = caller.data().inner.plugin_name.clone(); + tracing::info!(plugin = %plugin_name, level = %level_str, "{}", msg); + } + } + } + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_log: {e}")))?; + + // host_get_setting: Retrieve plugin settings + // Returns: packed u64 with error code (high 32 bits) and data length (low 32 bits) + // Data is stored in result buffer, retrieve with host_get_result + linker + .func_wrap("env", "host_get_setting", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, key_ptr: u32, key_len: u32| -> u64 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return pack_result(1, 0), + }; + + let mut key_buf = vec![0u8; key_len as usize]; + if memory.read(&caller, key_ptr as usize, &mut key_buf).is_err() { + return pack_result(2, 0); + } + + let key = match String::from_utf8(key_buf) { + Ok(s) => s, + Err(_) => return pack_result(3, 0), + }; + + let state = caller.data(); + if !state.inner.has_capability(CAP_SETTINGS) { + tracing::warn!(plugin = %state.inner.plugin_name, "Settings access denied: no capability"); + return pack_result(6, 0); + } + + let plugin_name = state.inner.plugin_name.clone(); + let community_id = state.inner.community_id; + let package_id = state.inner.package_id; + let pool = state.inner.pool.clone(); + + let fetched: Value = match block_in_place(|| { + Handle::current().block_on(async { + if let Some(cid) = community_id { + // First: per-community WASM package settings. + if let Some(v) = sqlx::query_scalar::<_, Option>( + r#"SELECT settings -> $3 + FROM community_plugin_packages + WHERE community_id = $1 AND package_id = $2"#, + ) + .bind(cid) + .bind(package_id) + .bind(&key) + .fetch_optional(&pool) + .await + .ok() + .flatten() + .flatten() + { + return v; + } + } + + // Second: plugin settings via community_plugins / default_settings. + let v: Value = sqlx::query_scalar( + "SELECT get_plugin_setting($1, $2, $3)" + ) + .bind(&plugin_name) + .bind(community_id) + .bind(&key) + .fetch_one(&pool) + .await + .unwrap_or(Value::Null); + + v + }) + }) { + v => v, + }; + + let result = serde_json::to_string(&fetched).unwrap_or("null".to_string()); + let len = caller.data_mut().store_result(result.as_bytes()); + tracing::debug!(plugin = %caller.data().inner.plugin_name, key = %key, "Get setting"); + pack_result(0, len) + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_get_setting: {e}")))?; + + // host_kv_get: Get value from plugin KV store (in-memory cache) + // Returns: packed u64 with error code (high 32 bits) and data length (low 32 bits) + linker + .func_wrap("env", "host_kv_get", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, key_ptr: u32, key_len: u32| -> u64 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return pack_result(1, 0), + }; + + let mut key_buf = vec![0u8; key_len as usize]; + if memory.read(&caller, key_ptr as usize, &mut key_buf).is_err() { + return pack_result(2, 0); + } + + let key = match String::from_utf8(key_buf) { + Ok(s) => s, + Err(_) => return pack_result(3, 0), + }; + + let state = caller.data(); + if !state.inner.has_capability(CAP_KV_STORE) { + tracing::warn!(plugin = %state.inner.plugin_name, "KV access denied: no capability"); + return pack_result(6, 0); + } + + let plugin_name = state.inner.plugin_name.clone(); + let community_id = state.inner.community_id; + let pool = state.inner.pool.clone(); + + let fetched: Value = block_in_place(|| { + Handle::current().block_on(async { + sqlx::query_scalar::<_, Option>("SELECT plugin_kv_get($1, $2, $3)") + .bind(&plugin_name) + .bind(community_id) + .bind(&key) + .fetch_one(&pool) + .await + .ok() + .flatten() + .unwrap_or(Value::Null) + }) + }); + + let result = serde_json::to_string(&fetched).unwrap_or("null".to_string()); + let len = caller.data_mut().store_result(result.as_bytes()); + tracing::debug!(plugin = %caller.data().inner.plugin_name, key = %key, "KV get"); + pack_result(0, len) + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_kv_get: {e}")))?; + + // host_kv_set: Set value in plugin KV store (in-memory cache, persisted after execution) + // Returns: 0 on success, error code on failure + linker + .func_wrap("env", "host_kv_set", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, key_ptr: u32, key_len: u32, val_ptr: u32, val_len: u32| -> u32 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return 1, + }; + + let mut key_buf = vec![0u8; key_len as usize]; + let mut val_buf = vec![0u8; val_len as usize]; + + if memory.read(&caller, key_ptr as usize, &mut key_buf).is_err() { + return 2; + } + if memory.read(&caller, val_ptr as usize, &mut val_buf).is_err() { + return 3; + } + + let key = match String::from_utf8(key_buf) { + Ok(s) => s, + Err(_) => return 4, + }; + let value = match String::from_utf8(val_buf) { + Ok(s) => s, + Err(_) => return 5, + }; + + let state = caller.data_mut(); + if !state.inner.has_capability(CAP_KV_STORE) { + tracing::warn!(plugin = %state.inner.plugin_name, "KV access denied: no capability"); + return 6; + } + + let plugin_name = state.inner.plugin_name.clone(); + let community_id = state.inner.community_id; + let pool = state.inner.pool.clone(); + + let parsed: Value = serde_json::from_str(&value).unwrap_or(Value::String(value)); + + let ok = block_in_place(|| { + Handle::current().block_on(async { + sqlx::query_scalar::<_, bool>("SELECT plugin_kv_set($1, $2, $3, $4, NULL)") + .bind(&plugin_name) + .bind(community_id) + .bind(&key) + .bind(parsed) + .fetch_one(&pool) + .await + .unwrap_or(false) + }) + }); + + if !ok { + return 7; + } + + tracing::debug!(plugin = %state.inner.plugin_name, key = %key, "KV set"); + 0 + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_kv_set: {e}")))?; + + // host_emit_event: Emit an event to the event system + // Returns: 0 on success, error code on failure + linker + .func_wrap("env", "host_emit_event", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, event_ptr: u32, event_len: u32, payload_ptr: u32, payload_len: u32| -> u32 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return 1, + }; + + let mut event_buf = vec![0u8; event_len as usize]; + let mut payload_buf = vec![0u8; payload_len as usize]; + + if memory.read(&caller, event_ptr as usize, &mut event_buf).is_err() { + return 2; + } + if memory.read(&caller, payload_ptr as usize, &mut payload_buf).is_err() { + return 3; + } + + let event_name = match String::from_utf8(event_buf) { + Ok(s) => s, + Err(_) => return 4, + }; + let payload_str = match String::from_utf8(payload_buf) { + Ok(s) => s, + Err(_) => return 5, + }; + + let state = caller.data(); + if !state.inner.has_capability(CAP_EMIT_EVENTS) { + tracing::warn!(plugin = %state.inner.plugin_name, "Event emit denied: no capability"); + return 6; + } + + // Parse payload as JSON + let payload: Value = serde_json::from_str(&payload_str).unwrap_or(Value::Null); + + let plugin_name = state.inner.plugin_name.clone(); + let community_id = state.inner.community_id; + let actor_user_id = state.inner.actor_user_id; + let pool = state.inner.pool.clone(); + + let stored = block_in_place(|| { + Handle::current().block_on(async { + let _ = sqlx::query!( + r#"INSERT INTO public_events (community_id, actor_user_id, plugin_name, event_type, payload) + VALUES ($1, $2, $3, $4, $5)"#, + community_id, + actor_user_id, + plugin_name.clone(), + event_name, + payload + ) + .execute(&pool) + .await; + + let _ = sqlx::query!( + r#"INSERT INTO plugin_events (plugin_name, community_id, actor_user_id, event_name, payload) + VALUES ($1, $2, $3, $4, $5)"#, + plugin_name.clone(), + community_id, + actor_user_id, + event_name, + payload + ) + .execute(&pool) + .await; + + true + }) + }); + + if !stored { + return 7; + } + + tracing::info!(plugin = %caller.data().inner.plugin_name, event = %event_name, community_id = ?community_id, "Plugin emitted event"); + 0 + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_emit_event: {e}")))?; + + // host_get_result: Copy result buffer to WASM memory + // This is a helper for retrieving data from host functions + linker + .func_wrap("env", "host_get_result", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, dest_ptr: u32, max_len: u32| -> u32 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return 0, + }; + + let result = caller.data().get_result(); + let copy_len = std::cmp::min(result.len(), max_len as usize); + + if memory.write(&mut caller, dest_ptr as usize, &result[..copy_len]).is_err() { + return 0; + } + + copy_len as u32 + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_get_result: {e}")))?; + + // host_http_request: Make outbound HTTP request (capability-gated) + linker + .func_wrap("env", "host_http_request", |mut caller: wasmtime::Caller<'_, HostStateWithLimits>, url_ptr: u32, url_len: u32, method_ptr: u32, method_len: u32, body_ptr: u32, body_len: u32| -> u64 { + let memory = match caller.get_export("memory").and_then(|e| e.into_memory()) { + Some(m) => m, + None => return pack_result(1, 0), + }; + + let mut url_buf = vec![0u8; url_len as usize]; + let mut method_buf = vec![0u8; method_len as usize]; + + if memory.read(&caller, url_ptr as usize, &mut url_buf).is_err() { + return pack_result(2, 0); + } + if memory.read(&caller, method_ptr as usize, &mut method_buf).is_err() { + return pack_result(3, 0); + } + + let mut body_buf = vec![0u8; body_len as usize]; + if body_len > 0 { + if memory.read(&caller, body_ptr as usize, &mut body_buf).is_err() { + return pack_result(8, 0); + } + } + + let url = match String::from_utf8(url_buf) { + Ok(s) => s, + Err(_) => return pack_result(4, 0), + }; + + let method = match String::from_utf8(method_buf) { + Ok(s) => s, + Err(_) => return pack_result(5, 0), + }; + + let body = String::from_utf8(body_buf).unwrap_or_default(); + + let state = caller.data(); + + if !state.inner.has_capability(CAP_OUTBOUND_HTTP) { + tracing::warn!(plugin = %state.inner.plugin_name, url = %url, "HTTP denied: no capability"); + return pack_result(6, 0); + } + + if !state.inner.is_url_allowed(&url) { + tracing::warn!(plugin = %state.inner.plugin_name, url = %url, "HTTP denied: not in allowlist"); + return pack_result(7, 0); + } + let plugin = state.inner.plugin_name.clone(); + + let response_json: Value = block_in_place(|| { + Handle::current().block_on(async { + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(10)) + .build(); + + let Ok(client) = client else { + return serde_json::json!({"error": "client_init_failed"}); + }; + + let mut req = match method.to_ascii_uppercase().as_str() { + "POST" => client.post(&url), + "PUT" => client.put(&url), + "DELETE" => client.delete(&url), + "PATCH" => client.patch(&url), + _ => client.get(&url), + }; + + if !body.is_empty() { + req = req.body(body.clone()); + } + + match req.send().await { + Ok(resp) => { + let status = resp.status().as_u16(); + let text = resp.text().await.unwrap_or_default(); + let truncated = if text.len() > 256_000 { + text[..256_000].to_string() + } else { + text + }; + serde_json::json!({"status": status, "body": truncated}) + } + Err(e) => serde_json::json!({"error": e.to_string()}), + } + }) + }); + + let result = serde_json::to_string(&response_json).unwrap_or("{}".to_string()); + let len = caller.data_mut().store_result(result.as_bytes()); + tracing::debug!(plugin = %plugin, url = %url, "HTTP request completed"); + pack_result(0, len) + }) + .map_err(|e| PluginError::Message(format!("Failed to register host_http_request: {e}")))?; + + Ok(()) +} + +fn pack_result(error_code: u32, data_len: u32) -> u64 { + ((error_code as u64) << 32) | (data_len as u64) +} + +/// WASM plugin manifest describing the plugin's metadata and capabilities. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PluginManifest { + pub name: String, + pub version: String, + pub description: String, + #[serde(default)] + pub author: Option, + #[serde(default)] + pub homepage: Option, + #[serde(default)] + pub license: Option, + #[serde(default)] + pub hooks: Vec, + #[serde(default)] + pub capabilities: Vec, + #[serde(default)] + pub settings_schema: Option, +} diff --git a/backend/src/plugins/wasm/mod.rs b/backend/src/plugins/wasm/mod.rs new file mode 100644 index 0000000..0ac7ed5 --- /dev/null +++ b/backend/src/plugins/wasm/mod.rs @@ -0,0 +1,9 @@ +//! WASM plugin runtime module. +//! +//! Provides sandboxed execution of third-party plugins using WebAssembly. + +pub mod host_api; +pub mod plugin; +pub mod runtime; + +pub use plugin::WasmPlugin; diff --git a/backend/src/plugins/wasm/plugin.rs b/backend/src/plugins/wasm/plugin.rs new file mode 100644 index 0000000..a04ad9a --- /dev/null +++ b/backend/src/plugins/wasm/plugin.rs @@ -0,0 +1,308 @@ +//! WASM plugin implementation. +//! +//! Wraps compiled WASM modules and implements the Plugin trait for +//! integration with the hook system. + +use std::sync::Arc; + +use async_trait::async_trait; +use serde_json::{json, Value}; +use sqlx::PgPool; +use uuid::Uuid; + +use super::host_api::{Capability, HostState, PluginManifest, CAP_EMIT_EVENTS, CAP_KV_STORE, CAP_OUTBOUND_HTTP, CAP_SETTINGS}; +use super::runtime::{CompiledPlugin, ExecutionLimits, PluginInstance}; +use crate::plugins::hooks::{HookContext, PluginError}; +use crate::plugins::manager::{Plugin, PluginMetadata, PluginScope, PluginSystem}; + +/// A WASM-based plugin that can be loaded dynamically. +pub struct WasmPlugin { + package_id: Uuid, + manifest: PluginManifest, + compiled: Arc, + limits: ExecutionLimits, +} + +impl WasmPlugin { + /// Creates a new WASM plugin from a manifest and compiled module. + pub fn new( + package_id: Uuid, + manifest: PluginManifest, + compiled: Arc, + ) -> Self { + Self { + package_id, + manifest, + compiled, + limits: ExecutionLimits::default(), + } + } + + /// Sets custom execution limits for this plugin. + #[allow(dead_code)] // API for future use + pub fn with_limits(mut self, limits: ExecutionLimits) -> Self { + self.limits = limits; + self + } + + async fn capabilities_for(&self, pool: &PgPool, ctx: &HookContext) -> Result, PluginError> { + let mut out: Vec = Vec::new(); + + // Community policy influences outbound HTTP. + let (allow_http, allowlist) = if let Some(cid) = ctx.community_id { + let row = sqlx::query!( + r#"SELECT settings as "settings!: serde_json::Value" FROM communities WHERE id = $1"#, + cid + ) + .fetch_optional(pool) + .await?; + + if let Some(row) = row { + let allow_http = row + .settings + .get("plugin_allow_outbound_http") + .and_then(|v: &serde_json::Value| v.as_bool()) + .unwrap_or(false); + + let allowlist: Vec = row + .settings + .get("plugin_http_egress_allowlist") + .and_then(|v: &serde_json::Value| v.as_array()) + .map(|arr: &Vec| { + arr.iter() + .filter_map(|v: &serde_json::Value| v.as_str().map(|s: &str| s.to_string())) + .collect() + }) + .unwrap_or_default(); + + (allow_http, allowlist) + } else { + (false, Vec::new()) + } + } else { + (false, Vec::new()) + }; + + for cap in &self.manifest.capabilities { + match cap.as_str() { + CAP_OUTBOUND_HTTP => { + let allowed = allow_http && !allowlist.is_empty(); + out.push(Capability { + name: cap.clone(), + allowed, + config: serde_json::json!({"allowlist": allowlist}), + }); + } + CAP_SETTINGS | CAP_KV_STORE | CAP_EMIT_EVENTS => { + out.push(Capability { + name: cap.clone(), + allowed: true, + config: serde_json::json!({}), + }); + } + _ => { + // Unknown capability is denied by default. + out.push(Capability { + name: cap.clone(), + allowed: false, + config: serde_json::json!({}), + }); + } + } + } + + Ok(out) + } + + async fn create_instance(&self, ctx: &HookContext) -> Result { + let capabilities = self.capabilities_for(&ctx.pool, ctx).await?; + let host_state = HostState::new( + self.manifest.name.clone(), + ctx.community_id, + ctx.actor_user_id, + ctx.pool.clone(), + self.package_id, + capabilities, + ); + + PluginInstance::new(&self.compiled, host_state, self.limits.clone()).await + } +} + +#[async_trait] +impl Plugin for WasmPlugin { + fn metadata(&self) -> PluginMetadata { + PluginMetadata { + name: Box::leak(self.manifest.name.clone().into_boxed_str()), + version: Box::leak(self.manifest.version.clone().into_boxed_str()), + description: Box::leak(self.manifest.description.clone().into_boxed_str()), + is_core: false, + scope: PluginScope::Community, + default_enabled: false, + settings_schema: self.manifest.settings_schema.clone(), + } + } + + fn register(&self, system: &mut PluginSystem) { + let plugin_name = self.manifest.name.clone(); + let package_id = self.package_id; + let handler_plugin_id = format!("wasm:{}", package_id); + let manifest_capabilities = self.manifest.capabilities.clone(); + let compiled = self.compiled.clone(); + let limits = self.limits.clone(); + + for hook in &self.manifest.hooks { + let hook_name = hook.clone(); + let hook_name_ref = hook_name.clone(); + let plugin_name_clone = plugin_name.clone(); + let handler_plugin_id_clone = handler_plugin_id.clone(); + let compiled_clone = compiled.clone(); + let limits_clone = limits.clone(); + let manifest_capabilities_for_hook = manifest_capabilities.clone(); + + system.add_action( + &hook_name_ref, + handler_plugin_id_clone.clone(), + 50, + Arc::new(move |ctx: HookContext, payload: Value| { + let hook = hook_name.clone(); + let plugin = plugin_name_clone.clone(); + let package_id = package_id; + let manifest_capabilities = manifest_capabilities_for_hook.clone(); + let compiled = compiled_clone.clone(); + let lim = limits_clone.clone(); + + Box::pin(async move { + let (allow_http, allowlist) = if let Some(cid) = ctx.community_id { + let row = sqlx::query!( + r#"SELECT settings as "settings!: serde_json::Value" FROM communities WHERE id = $1"#, + cid + ) + .fetch_optional(&ctx.pool) + .await?; + + if let Some(row) = row { + let allow_http = row + .settings + .get("plugin_allow_outbound_http") + .and_then(|v: &serde_json::Value| v.as_bool()) + .unwrap_or(false); + + let allowlist: Vec = row + .settings + .get("plugin_http_egress_allowlist") + .and_then(|v: &serde_json::Value| v.as_array()) + .map(|arr: &Vec| { + arr.iter() + .filter_map(|v: &serde_json::Value| v.as_str().map(|s: &str| s.to_string())) + .collect() + }) + .unwrap_or_default(); + + (allow_http, allowlist) + } else { + (false, Vec::new()) + } + } else { + (false, Vec::new()) + }; + + let mut capabilities: Vec = Vec::new(); + for cap in &manifest_capabilities { + match cap.as_str() { + CAP_OUTBOUND_HTTP => { + let allowed = allow_http && !allowlist.is_empty(); + capabilities.push(Capability { + name: cap.clone(), + allowed, + config: serde_json::json!({"allowlist": allowlist.clone()}), + }); + } + CAP_SETTINGS | CAP_KV_STORE | CAP_EMIT_EVENTS => { + capabilities.push(Capability { + name: cap.clone(), + allowed: true, + config: serde_json::json!({}), + }); + } + _ => capabilities.push(Capability { + name: cap.clone(), + allowed: false, + config: serde_json::json!({}), + }), + } + } + + let host_state = HostState::new( + plugin.clone(), + ctx.community_id, + ctx.actor_user_id, + ctx.pool.clone(), + package_id, + capabilities, + ); + + let mut instance = PluginInstance::new(&compiled, host_state, lim).await?; + + let payload_json = serde_json::to_string(&payload) + .map_err(|e| PluginError::Message(format!("Failed to serialize payload: {e}")))?; + + let _result = instance.call_hook(&hook, &payload_json).await?; + + let remaining_fuel = instance.get_fuel(); + tracing::debug!( + plugin = %plugin, + hook = %hook, + fuel_remaining = remaining_fuel, + "WASM plugin hook completed" + ); + + Ok(()) + }) + }), + ); + } + } + + async fn activate(&self, ctx: HookContext, settings: Value) -> Result<(), PluginError> { + let mut instance = self.create_instance(&ctx).await?; + let payload = json!({ + "event": "activate", + "settings": settings + }); + let payload_json = serde_json::to_string(&payload) + .map_err(|e| PluginError::Message(format!("Failed to serialize: {e}")))?; + instance.call_hook("lifecycle.activate", &payload_json).await.ok(); + Ok(()) + } + + async fn deactivate(&self, ctx: HookContext, settings: Value) -> Result<(), PluginError> { + let mut instance = self.create_instance(&ctx).await?; + let payload = json!({ + "event": "deactivate", + "settings": settings + }); + let payload_json = serde_json::to_string(&payload) + .map_err(|e| PluginError::Message(format!("Failed to serialize: {e}")))?; + instance.call_hook("lifecycle.deactivate", &payload_json).await.ok(); + Ok(()) + } + + async fn settings_updated( + &self, + ctx: HookContext, + old_settings: Value, + new_settings: Value, + ) -> Result<(), PluginError> { + let mut instance = self.create_instance(&ctx).await?; + let payload = json!({ + "event": "settings_updated", + "old_settings": old_settings, + "new_settings": new_settings + }); + let payload_json = serde_json::to_string(&payload) + .map_err(|e| PluginError::Message(format!("Failed to serialize: {e}")))?; + instance.call_hook("lifecycle.settings_updated", &payload_json).await.ok(); + Ok(()) + } +} diff --git a/backend/src/plugins/wasm/runtime.rs b/backend/src/plugins/wasm/runtime.rs new file mode 100644 index 0000000..d5ce8c9 --- /dev/null +++ b/backend/src/plugins/wasm/runtime.rs @@ -0,0 +1,230 @@ +//! WASM runtime for plugin execution. +//! +//! Provides sandboxed execution of WASM plugins with resource limits, +//! timeout handling via epoch interruption, and fuel metering. + +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::oneshot; +use wasmtime::{Config, Engine, Linker, Module, Store, StoreLimitsBuilder}; + +use super::host_api::{HostState, HostStateWithLimits}; +use crate::plugins::PluginError; + +/// Default fuel limit for WASM execution (computational steps). +pub const DEFAULT_FUEL_LIMIT: u64 = 10_000_000; +/// Default timeout in milliseconds. +pub const DEFAULT_TIMEOUT_MS: u64 = 5000; +/// Default memory limit (16 MB). +pub const DEFAULT_MEMORY_LIMIT_BYTES: usize = 16 * 1024 * 1024; +/// Default table element limit. +pub const DEFAULT_TABLE_ELEMENTS: u32 = 10_000; + +/// WASM runtime engine with epoch-based timeout support. +pub struct WasmRuntime { + engine: Arc, + _epoch_ticker_shutdown: Option>, +} + +/// Execution limits for WASM plugins. +#[derive(Clone, Debug)] +pub struct ExecutionLimits { + pub fuel: u64, + pub timeout_ms: u64, + pub memory_bytes: usize, + pub table_elements: u32, +} + +impl Default for ExecutionLimits { + fn default() -> Self { + Self { + fuel: DEFAULT_FUEL_LIMIT, + timeout_ms: DEFAULT_TIMEOUT_MS, + memory_bytes: DEFAULT_MEMORY_LIMIT_BYTES, + table_elements: DEFAULT_TABLE_ELEMENTS, + } + } +} + +impl WasmRuntime { + /// Creates a new WASM runtime with epoch ticker for timeout handling. + pub fn new() -> Result { + let mut config = Config::new(); + config.async_support(true); + config.consume_fuel(true); + config.epoch_interruption(true); + + let engine = Arc::new( + Engine::new(&config) + .map_err(|e| PluginError::Message(format!("Failed to create WASM engine: {e}")))? + ); + + // Spawn epoch ticker for timeout enforcement + let (shutdown_tx, mut shutdown_rx) = oneshot::channel(); + let engine_clone = engine.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_millis(10)); + loop { + tokio::select! { + _ = interval.tick() => { + engine_clone.increment_epoch(); + } + _ = &mut shutdown_rx => { + break; + } + } + } + }); + + Ok(Self { + engine, + _epoch_ticker_shutdown: Some(shutdown_tx), + }) + } + + /// Compiles WASM bytes into a reusable module. + pub fn compile(&self, wasm_bytes: &[u8]) -> Result { + let module = Module::new(&self.engine, wasm_bytes) + .map_err(|e| PluginError::Message(format!("Failed to compile WASM module: {e}")))?; + + Ok(CompiledPlugin { + engine: self.engine.clone(), + module, + }) + } +} + +/// A compiled WASM plugin ready for instantiation. +pub struct CompiledPlugin { + engine: Arc, + module: Module, +} + +impl CompiledPlugin { + pub fn engine(&self) -> &Engine { + &self.engine + } + + pub fn module(&self) -> &Module { + &self.module + } +} + +/// An instantiated WASM plugin ready for hook execution. +pub struct PluginInstance { + store: Store, + instance: wasmtime::Instance, +} + +impl PluginInstance { + /// Creates a new plugin instance with the given host state and limits. + pub async fn new( + compiled: &CompiledPlugin, + host_state: HostState, + limits: ExecutionLimits, + ) -> Result { + let store_limits = StoreLimitsBuilder::new() + .memory_size(limits.memory_bytes) + .table_elements(limits.table_elements) + .instances(10) + .tables(10) + .memories(1) + .build(); + + let state_with_limits = HostStateWithLimits { + inner: host_state, + limits: store_limits, + }; + + let mut store = Store::new(compiled.engine(), state_with_limits); + store.limiter(|state| &mut state.limits); + + store.set_fuel(limits.fuel).map_err(|e| { + PluginError::Message(format!("Failed to set fuel limit: {e}")) + })?; + + let epoch_deadline = (limits.timeout_ms / 10).max(1); + store.epoch_deadline_async_yield_and_update(epoch_deadline); + + let mut linker: Linker = Linker::new(compiled.engine()); + super::host_api::register_host_functions(&mut linker)?; + + let instance = linker + .instantiate_async(&mut store, compiled.module()) + .await + .map_err(|e| PluginError::Message(format!("Failed to instantiate WASM module: {e}")))?; + + Ok(Self { store, instance }) + } + + pub async fn call_hook( + &mut self, + hook_name: &str, + payload_json: &str, + ) -> Result { + let alloc = self + .instance + .get_typed_func::(&mut self.store, "alloc") + .map_err(|e| PluginError::Message(format!("Plugin missing 'alloc' export: {e}")))?; + + let dealloc = self + .instance + .get_typed_func::<(u32, u32), ()>(&mut self.store, "dealloc") + .map_err(|e| PluginError::Message(format!("Plugin missing 'dealloc' export: {e}")))?; + + let handle_hook = self + .instance + .get_typed_func::<(u32, u32, u32, u32), u64>(&mut self.store, "handle_hook") + .map_err(|e| PluginError::Message(format!("Plugin missing 'handle_hook' export: {e}")))?; + + let memory = self + .instance + .get_memory(&mut self.store, "memory") + .ok_or_else(|| PluginError::Message("Plugin missing 'memory' export".to_string()))?; + + let hook_bytes = hook_name.as_bytes(); + let hook_ptr = alloc.call_async(&mut self.store, hook_bytes.len() as u32).await + .map_err(|e| PluginError::Message(format!("alloc failed for hook name: {e}")))?; + memory.write(&mut self.store, hook_ptr as usize, hook_bytes) + .map_err(|e| PluginError::Message(format!("Failed to write hook name: {e}")))?; + + let payload_bytes = payload_json.as_bytes(); + let payload_ptr = alloc.call_async(&mut self.store, payload_bytes.len() as u32).await + .map_err(|e| PluginError::Message(format!("alloc failed for payload: {e}")))?; + memory.write(&mut self.store, payload_ptr as usize, payload_bytes) + .map_err(|e| PluginError::Message(format!("Failed to write payload: {e}")))?; + + let result = handle_hook + .call_async( + &mut self.store, + ( + hook_ptr, + hook_bytes.len() as u32, + payload_ptr, + payload_bytes.len() as u32, + ), + ) + .await + .map_err(|e| PluginError::Message(format!("handle_hook failed: {e}")))?; + + let result_ptr = (result >> 32) as u32; + let result_len = (result & 0xFFFFFFFF) as u32; + + let mut result_bytes = vec![0u8; result_len as usize]; + memory.read(&self.store, result_ptr as usize, &mut result_bytes) + .map_err(|e| PluginError::Message(format!("Failed to read result: {e}")))?; + + dealloc.call_async(&mut self.store, (hook_ptr, hook_bytes.len() as u32)).await.ok(); + dealloc.call_async(&mut self.store, (payload_ptr, payload_bytes.len() as u32)).await.ok(); + dealloc.call_async(&mut self.store, (result_ptr, result_len)).await.ok(); + + String::from_utf8(result_bytes) + .map_err(|e| PluginError::Message(format!("Result is not valid UTF-8: {e}"))) + } + + pub fn get_fuel(&self) -> u64 { + self.store.get_fuel().unwrap_or(0) + } +} diff --git a/backend/src/voting/mod.rs b/backend/src/voting/mod.rs new file mode 100644 index 0000000..b029450 --- /dev/null +++ b/backend/src/voting/mod.rs @@ -0,0 +1,107 @@ +//! Advanced Voting Methods +//! +//! Implements various voting algorithms as described in the Democracy Design manifesto: +//! - Schulze Method (Condorcet-consistent pairwise comparison) +//! - STAR Voting (Score Then Automatic Runoff) +//! - Quadratic Voting (intensity-weighted preferences) +//! - Ranked Choice / Instant Runoff + +pub mod schulze; +pub mod star; +pub mod quadratic; +pub mod ranked_choice; + +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +/// Voting method types +/// Used by voting calculation services when tallying results. +#[allow(dead_code)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum VotingMethod { + /// Simple approval voting (vote for multiple options) + Approval, + /// Ranked choice / instant runoff + RankedChoice, + /// Schulze method (Condorcet) + Schulze, + /// STAR voting (Score Then Automatic Runoff) + Star, + /// Quadratic voting (intensity-weighted) + Quadratic, +} + +impl Default for VotingMethod { + fn default() -> Self { + Self::Approval + } +} + +impl std::fmt::Display for VotingMethod { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + VotingMethod::Approval => write!(f, "approval"), + VotingMethod::RankedChoice => write!(f, "ranked_choice"), + VotingMethod::Schulze => write!(f, "schulze"), + VotingMethod::Star => write!(f, "star"), + VotingMethod::Quadratic => write!(f, "quadratic"), + } + } +} + +/// Result of a voting calculation +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VotingResult { + /// The winning option (if any) + pub winner: Option, + /// Full ranking of options + pub ranking: Vec, + /// Method-specific details + pub details: VotingDetails, + /// Total number of ballots counted + pub total_ballots: usize, +} + +/// An option with its rank and score +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RankedOption { + pub option_id: Uuid, + pub rank: usize, + pub score: f64, +} + +/// Method-specific voting details +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "method")] +pub enum VotingDetails { + Approval { + vote_counts: Vec<(Uuid, i64)>, + }, + RankedChoice { + rounds: Vec, + eliminated: Vec, + }, + Schulze { + pairwise_matrix: Vec>, + strongest_paths: Vec>, + option_ids: Vec, + }, + Star { + score_totals: Vec<(Uuid, i64)>, + finalists: (Uuid, Uuid), + runoff_votes: (i64, i64), + }, + Quadratic { + vote_totals: Vec<(Uuid, i64)>, + total_credits_spent: i64, + }, +} + +/// Result of a single round in ranked choice voting +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RoundResult { + pub round: usize, + pub vote_counts: Vec<(Uuid, i64)>, + pub eliminated: Option, +} diff --git a/backend/src/voting/quadratic.rs b/backend/src/voting/quadratic.rs new file mode 100644 index 0000000..9bc61f5 --- /dev/null +++ b/backend/src/voting/quadratic.rs @@ -0,0 +1,179 @@ +//! Quadratic Voting Implementation +//! +//! Voters allocate credits to options, where the cost of N votes is N². +//! This allows expressing intensity of preference while preventing +//! concentration of power. +//! +//! Used by the voting tallying service when calculating results for +//! proposals using quadratic voting method. + +use std::collections::HashMap; +use uuid::Uuid; + +use super::{RankedOption, VotingDetails, VotingResult}; + +/// A quadratic ballot (voter's credit allocations) +#[derive(Debug, Clone)] +pub struct QuadraticBallot { + pub total_credits: i32, + pub allocations: Vec<(Uuid, i32)>, // (option_id, votes) where cost = votes² +} + +impl QuadraticBallot { + /// Calculate total credits spent + pub fn credits_spent(&self) -> i32 { + self.allocations.iter().map(|(_, v)| v * v).sum() + } + + /// Check if ballot is valid (doesn't exceed budget) + pub fn is_valid(&self) -> bool { + self.credits_spent() <= self.total_credits + } +} + +/// Calculate the cost for a given number of votes +pub fn vote_cost(votes: i32) -> i32 { + votes * votes +} + +/// Calculate max votes possible with given credits +pub fn max_votes_for_credits(credits: i32) -> i32 { + (credits as f64).sqrt().floor() as i32 +} + +/// Calculate voting results using Quadratic voting +pub fn calculate(options: &[Uuid], ballots: &[QuadraticBallot]) -> VotingResult { + if options.is_empty() { + return VotingResult { + winner: None, + ranking: vec![], + details: VotingDetails::Quadratic { + vote_totals: vec![], + total_credits_spent: 0, + }, + total_ballots: ballots.len(), + }; + } + + // Sum votes for each option + let mut vote_totals: HashMap = HashMap::new(); + for opt in options { + vote_totals.insert(*opt, 0); + } + + let mut total_credits_spent: i64 = 0; + + for ballot in ballots { + if !ballot.is_valid() { + continue; // Skip invalid ballots + } + + total_credits_spent += ballot.credits_spent() as i64; + + for (opt_id, votes) in &ballot.allocations { + if let Some(total) = vote_totals.get_mut(opt_id) { + *total += *votes as i64; + } + } + } + + // Sort by total votes (descending) + let mut sorted_votes: Vec<(Uuid, i64)> = vote_totals.iter() + .map(|(&id, &votes)| (id, votes)) + .collect(); + sorted_votes.sort_by(|a, b| b.1.cmp(&a.1)); + + let ranking: Vec = sorted_votes.iter() + .enumerate() + .map(|(i, (id, votes))| RankedOption { + option_id: *id, + rank: i + 1, + score: *votes as f64, + }) + .collect(); + + let winner = ranking.first().map(|r| r.option_id); + + VotingResult { + winner, + ranking, + details: VotingDetails::Quadratic { + vote_totals: sorted_votes, + total_credits_spent, + }, + total_ballots: ballots.len(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vote_cost() { + assert_eq!(vote_cost(1), 1); + assert_eq!(vote_cost(2), 4); + assert_eq!(vote_cost(3), 9); + assert_eq!(vote_cost(10), 100); + } + + #[test] + fn test_max_votes() { + assert_eq!(max_votes_for_credits(1), 1); + assert_eq!(max_votes_for_credits(4), 2); + assert_eq!(max_votes_for_credits(9), 3); + assert_eq!(max_votes_for_credits(100), 10); + assert_eq!(max_votes_for_credits(99), 9); // Floor + } + + #[test] + fn test_intensity_expression() { + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let options = vec![a, b]; + + // Voter 1 cares strongly about A (spends 81 credits for 9 votes) + // Voter 2-4 mildly prefer B (each spends 4 credits for 2 votes) + let ballots = vec![ + QuadraticBallot { + total_credits: 100, + allocations: vec![(a, 9)], // 81 credits + }, + QuadraticBallot { + total_credits: 100, + allocations: vec![(b, 2)], // 4 credits + }, + QuadraticBallot { + total_credits: 100, + allocations: vec![(b, 2)], // 4 credits + }, + QuadraticBallot { + total_credits: 100, + allocations: vec![(b, 2)], // 4 credits + }, + ]; + + let result = calculate(&options, &ballots); + + // A: 9 votes, B: 6 votes -> A wins despite fewer supporters + // This demonstrates intensity expression + assert_eq!(result.winner, Some(a)); + } + + #[test] + fn test_invalid_ballot_rejected() { + let a = Uuid::new_v4(); + let options = vec![a]; + + let ballots = vec![ + QuadraticBallot { + total_credits: 100, + allocations: vec![(a, 11)], // Costs 121, exceeds 100 + }, + ]; + + let result = calculate(&options, &ballots); + // Invalid ballot should be skipped + assert_eq!(result.ranking[0].score, 0.0); + } +} diff --git a/backend/src/voting/ranked_choice.rs b/backend/src/voting/ranked_choice.rs new file mode 100644 index 0000000..3d358d1 --- /dev/null +++ b/backend/src/voting/ranked_choice.rs @@ -0,0 +1,246 @@ +//! Ranked Choice / Instant Runoff Voting Implementation +//! +//! Voters rank options in order of preference. If no option has a majority, +//! the lowest-ranked option is eliminated and votes are redistributed +//! until one option achieves majority. + +use std::collections::{HashMap, HashSet}; +use uuid::Uuid; + +use super::{RankedOption, RoundResult, VotingDetails, VotingResult}; + +/// A ranked ballot (voter's preference order) +#[derive(Debug, Clone)] +pub struct RankedBallot { + pub rankings: Vec, // Ordered list: index 0 = first choice +} + +/// Calculate voting results using Ranked Choice / Instant Runoff +pub fn calculate(options: &[Uuid], ballots: &[RankedBallot]) -> VotingResult { + if options.is_empty() || ballots.is_empty() { + return VotingResult { + winner: None, + ranking: vec![], + details: VotingDetails::RankedChoice { + rounds: vec![], + eliminated: vec![], + }, + total_ballots: ballots.len(), + }; + } + + let mut active_options: HashSet = options.iter().cloned().collect(); + let mut eliminated: Vec = vec![]; + let mut rounds: Vec = vec![]; + let majority_threshold = (ballots.len() as f64 / 2.0).floor() as i64 + 1; + + loop { + // Count first-choice votes among active options + let mut vote_counts: HashMap = active_options.iter() + .map(|&id| (id, 0)) + .collect(); + + for ballot in ballots { + // Find first choice among active options + for opt in &ballot.rankings { + if active_options.contains(opt) { + *vote_counts.get_mut(opt).unwrap() += 1; + break; + } + } + } + + // Sort by vote count + let mut sorted: Vec<(Uuid, i64)> = vote_counts.iter() + .map(|(&id, &count)| (id, count)) + .collect(); + sorted.sort_by(|a, b| b.1.cmp(&a.1)); + + let round_num = rounds.len() + 1; + + // Check for majority winner + if let Some((winner, count)) = sorted.first() { + if *count >= majority_threshold { + rounds.push(RoundResult { + round: round_num, + vote_counts: sorted.clone(), + eliminated: None, + }); + + // Build final ranking + let mut final_ranking: Vec = sorted.iter() + .enumerate() + .map(|(i, (id, count))| RankedOption { + option_id: *id, + rank: i + 1, + score: *count as f64, + }) + .collect(); + + // Add eliminated options at the end (in reverse elimination order) + for (_i, &opt) in eliminated.iter().rev().enumerate() { + final_ranking.push(RankedOption { + option_id: opt, + rank: final_ranking.len() + 1, + score: 0.0, + }); + } + + return VotingResult { + winner: Some(*winner), + ranking: final_ranking, + details: VotingDetails::RankedChoice { + rounds, + eliminated, + }, + total_ballots: ballots.len(), + }; + } + } + + // Only one option left - it wins + if active_options.len() <= 1 { + let winner = active_options.iter().next().cloned(); + + rounds.push(RoundResult { + round: round_num, + vote_counts: sorted.clone(), + eliminated: None, + }); + + let mut final_ranking: Vec = sorted.iter() + .enumerate() + .map(|(i, (id, count))| RankedOption { + option_id: *id, + rank: i + 1, + score: *count as f64, + }) + .collect(); + + for (_i, &opt) in eliminated.iter().rev().enumerate() { + final_ranking.push(RankedOption { + option_id: opt, + rank: final_ranking.len() + 1, + score: 0.0, + }); + } + + return VotingResult { + winner, + ranking: final_ranking, + details: VotingDetails::RankedChoice { + rounds, + eliminated, + }, + total_ballots: ballots.len(), + }; + } + + // Eliminate lowest-ranked option + if let Some((loser, _)) = sorted.last() { + let loser_id = *loser; + + rounds.push(RoundResult { + round: round_num, + vote_counts: sorted, + eliminated: Some(loser_id), + }); + + active_options.remove(&loser_id); + eliminated.push(loser_id); + } else { + break; + } + } + + // Fallback (shouldn't reach here) + VotingResult { + winner: None, + ranking: vec![], + details: VotingDetails::RankedChoice { + rounds, + eliminated, + }, + total_ballots: ballots.len(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_first_round_majority() { + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let c = Uuid::new_v4(); + let options = vec![a, b, c]; + + // A has clear majority + let ballots = vec![ + RankedBallot { rankings: vec![a, b, c] }, + RankedBallot { rankings: vec![a, b, c] }, + RankedBallot { rankings: vec![a, c, b] }, + RankedBallot { rankings: vec![b, a, c] }, + RankedBallot { rankings: vec![c, b, a] }, + ]; + + let result = calculate(&options, &ballots); + assert_eq!(result.winner, Some(a)); // 3/5 = 60% majority + } + + #[test] + fn test_runoff_needed() { + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let c = Uuid::new_v4(); + let options = vec![a, b, c]; + + // No first-round majority, C eliminated, B wins + let ballots = vec![ + RankedBallot { rankings: vec![a, b, c] }, + RankedBallot { rankings: vec![a, b, c] }, + RankedBallot { rankings: vec![b, a, c] }, + RankedBallot { rankings: vec![b, c, a] }, + RankedBallot { rankings: vec![c, b, a] }, // C's vote goes to B + ]; + + let result = calculate(&options, &ballots); + + // Round 1: A=2, B=2, C=1 -> C eliminated + // Round 2: A=2, B=3 -> B wins with majority + assert_eq!(result.winner, Some(b)); + + if let VotingDetails::RankedChoice { rounds, eliminated } = &result.details { + assert_eq!(rounds.len(), 2); + assert_eq!(eliminated, &vec![c]); + } else { + panic!("Wrong voting details type"); + } + } + + #[test] + fn test_spoiler_effect_eliminated() { + // Classic example: spoiler candidate doesn't affect outcome + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let spoiler = Uuid::new_v4(); + let options = vec![a, b, spoiler]; + + let ballots = vec![ + RankedBallot { rankings: vec![a, spoiler, b] }, + RankedBallot { rankings: vec![a, spoiler, b] }, + RankedBallot { rankings: vec![spoiler, a, b] }, // Spoiler fans prefer A + RankedBallot { rankings: vec![b, a, spoiler] }, + RankedBallot { rankings: vec![b, a, spoiler] }, + ]; + + let result = calculate(&options, &ballots); + + // Without RCV, spoiler might split A's vote + // With RCV, spoiler eliminated, vote goes to A + // Round 1: A=2, B=2, Spoiler=1 -> Spoiler eliminated + // Round 2: A=3, B=2 -> A wins + assert_eq!(result.winner, Some(a)); + } +} diff --git a/backend/src/voting/schulze.rs b/backend/src/voting/schulze.rs new file mode 100644 index 0000000..84f673d --- /dev/null +++ b/backend/src/voting/schulze.rs @@ -0,0 +1,195 @@ +//! Schulze Method Implementation +//! +//! The Schulze method is a Condorcet-consistent voting system that uses +//! pairwise comparisons and the Floyd-Warshall algorithm to find the +//! strongest paths between candidates. + +use std::collections::HashMap; +use uuid::Uuid; + +use super::{RankedOption, VotingDetails, VotingResult}; + +/// A ranked ballot (voter's preference order) +#[derive(Debug, Clone)] +pub struct RankedBallot { + pub rankings: Vec<(Uuid, usize)>, // (option_id, rank) where 1 = first choice +} + +/// Calculate voting results using the Schulze method +pub fn calculate(options: &[Uuid], ballots: &[RankedBallot]) -> VotingResult { + let n = options.len(); + if n == 0 { + return VotingResult { + winner: None, + ranking: vec![], + details: VotingDetails::Schulze { + pairwise_matrix: vec![], + strongest_paths: vec![], + option_ids: vec![], + }, + total_ballots: ballots.len(), + }; + } + + // Create option index mapping + let _option_index: HashMap = options + .iter() + .enumerate() + .map(|(i, &id)| (id, i)) + .collect(); + + // Build pairwise preference matrix + // d[i][j] = number of voters who prefer option i over option j + let mut d: Vec> = vec![vec![0; n]; n]; + + for ballot in ballots { + let ballot_ranks: HashMap = ballot.rankings.iter().cloned().collect(); + + for i in 0..n { + for j in 0..n { + if i == j { + continue; + } + + let opt_i = options[i]; + let opt_j = options[j]; + + let rank_i = ballot_ranks.get(&opt_i).copied(); + let rank_j = ballot_ranks.get(&opt_j).copied(); + + // Lower rank number = higher preference + // Unranked options are considered worse than ranked ones + let i_preferred = match (rank_i, rank_j) { + (Some(ri), Some(rj)) => ri < rj, + (Some(_), None) => true, // Ranked beats unranked + (None, Some(_)) => false, // Unranked loses to ranked + (None, None) => false, // Both unranked = no preference + }; + + if i_preferred { + d[i][j] += 1; + } + } + } + } + + // Calculate strongest paths using Floyd-Warshall variant + // p[i][j] = strength of strongest path from i to j + let mut p: Vec> = vec![vec![0; n]; n]; + + // Initialize with direct comparisons (margin of victory) + for i in 0..n { + for j in 0..n { + if i != j && d[i][j] > d[j][i] { + p[i][j] = d[i][j]; + } + } + } + + // Floyd-Warshall to find strongest paths + for k in 0..n { + for i in 0..n { + if i == k { + continue; + } + for j in 0..n { + if j == i || j == k { + continue; + } + // Strength of path through k is min of the two segments + let path_strength = p[i][k].min(p[k][j]); + if path_strength > p[i][j] { + p[i][j] = path_strength; + } + } + } + } + + // Determine winner and ranking + // Option i beats j if p[i][j] > p[j][i] + let mut wins: Vec<(Uuid, i32)> = options + .iter() + .enumerate() + .map(|(i, &opt_id)| { + let win_count: i32 = (0..n) + .filter(|&j| i != j && p[i][j] > p[j][i]) + .count() as i32; + (opt_id, win_count) + }) + .collect(); + + // Sort by win count (descending) + wins.sort_by(|a, b| b.1.cmp(&a.1)); + + let ranking: Vec = wins + .iter() + .enumerate() + .map(|(rank, (opt_id, win_count))| RankedOption { + option_id: *opt_id, + rank: rank + 1, + score: *win_count as f64, + }) + .collect(); + + let winner = ranking.first().map(|r| r.option_id); + + VotingResult { + winner, + ranking, + details: VotingDetails::Schulze { + pairwise_matrix: d, + strongest_paths: p, + option_ids: options.to_vec(), + }, + total_ballots: ballots.len(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_simple_majority() { + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let c = Uuid::new_v4(); + let options = vec![a, b, c]; + + // 3 voters prefer A > B > C + // 2 voters prefer B > C > A + let ballots = vec![ + RankedBallot { rankings: vec![(a, 1), (b, 2), (c, 3)] }, + RankedBallot { rankings: vec![(a, 1), (b, 2), (c, 3)] }, + RankedBallot { rankings: vec![(a, 1), (b, 2), (c, 3)] }, + RankedBallot { rankings: vec![(b, 1), (c, 2), (a, 3)] }, + RankedBallot { rankings: vec![(b, 1), (c, 2), (a, 3)] }, + ]; + + let result = calculate(&options, &ballots); + assert_eq!(result.winner, Some(a)); + assert_eq!(result.total_ballots, 5); + } + + #[test] + fn test_condorcet_cycle() { + // Classic rock-paper-scissors cycle + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let c = Uuid::new_v4(); + let options = vec![a, b, c]; + + // A beats B, B beats C, C beats A + let ballots = vec![ + RankedBallot { rankings: vec![(a, 1), (b, 2), (c, 3)] }, + RankedBallot { rankings: vec![(a, 1), (b, 2), (c, 3)] }, + RankedBallot { rankings: vec![(b, 1), (c, 2), (a, 3)] }, + RankedBallot { rankings: vec![(b, 1), (c, 2), (a, 3)] }, + RankedBallot { rankings: vec![(c, 1), (a, 2), (b, 3)] }, + ]; + + let result = calculate(&options, &ballots); + // Schulze should still produce a winner even with a cycle + assert!(result.winner.is_some()); + } +} diff --git a/backend/src/voting/star.rs b/backend/src/voting/star.rs new file mode 100644 index 0000000..bb3c991 --- /dev/null +++ b/backend/src/voting/star.rs @@ -0,0 +1,174 @@ +//! STAR Voting Implementation +//! +//! Score Then Automatic Runoff: voters score each option 0-5, +//! top two scorers advance to automatic runoff based on preferences. + +use std::collections::HashMap; +use uuid::Uuid; + +use super::{RankedOption, VotingDetails, VotingResult}; + +/// A score ballot (voter's scores for each option) +#[derive(Debug, Clone)] +pub struct ScoreBallot { + pub scores: Vec<(Uuid, i32)>, // (option_id, score 0-5) +} + +/// Calculate voting results using STAR voting +pub fn calculate(options: &[Uuid], ballots: &[ScoreBallot]) -> VotingResult { + if options.is_empty() || ballots.is_empty() { + return VotingResult { + winner: None, + ranking: vec![], + details: VotingDetails::Star { + score_totals: vec![], + finalists: (Uuid::nil(), Uuid::nil()), + runoff_votes: (0, 0), + }, + total_ballots: ballots.len(), + }; + } + + // Phase 1: Sum all scores + let mut score_totals: HashMap = HashMap::new(); + for opt in options { + score_totals.insert(*opt, 0); + } + + for ballot in ballots { + for (opt_id, score) in &ballot.scores { + if let Some(total) = score_totals.get_mut(opt_id) { + *total += *score as i64; + } + } + } + + // Sort by total score (descending) + let mut sorted_scores: Vec<(Uuid, i64)> = score_totals.iter() + .map(|(&id, &score)| (id, score)) + .collect(); + sorted_scores.sort_by(|a, b| b.1.cmp(&a.1)); + + if sorted_scores.len() < 2 { + // Only one option - it wins by default + let winner = sorted_scores.first().map(|(id, _)| *id); + return VotingResult { + winner, + ranking: sorted_scores.iter().enumerate().map(|(i, (id, score))| { + RankedOption { + option_id: *id, + rank: i + 1, + score: *score as f64, + } + }).collect(), + details: VotingDetails::Star { + score_totals: sorted_scores, + finalists: (winner.unwrap_or(Uuid::nil()), Uuid::nil()), + runoff_votes: (ballots.len() as i64, 0), + }, + total_ballots: ballots.len(), + }; + } + + // Phase 2: Top two advance to runoff + let finalist_a = sorted_scores[0].0; + let finalist_b = sorted_scores[1].0; + + // Count how many ballots prefer A over B (higher score = preference) + let mut a_preferred: i64 = 0; + let mut b_preferred: i64 = 0; + + for ballot in ballots { + let ballot_scores: HashMap = ballot.scores.iter().cloned().collect(); + + let score_a = ballot_scores.get(&finalist_a).copied().unwrap_or(0); + let score_b = ballot_scores.get(&finalist_b).copied().unwrap_or(0); + + if score_a > score_b { + a_preferred += 1; + } else if score_b > score_a { + b_preferred += 1; + } + // Equal scores = no preference, not counted + } + + // Winner is whoever is preferred by more voters in the runoff + let (winner, runoff_votes) = if a_preferred >= b_preferred { + (finalist_a, (a_preferred, b_preferred)) + } else { + (finalist_b, (b_preferred, a_preferred)) + }; + + // Build final ranking + let mut ranking: Vec = sorted_scores.iter().enumerate().map(|(i, (id, score))| { + RankedOption { + option_id: *id, + rank: i + 1, + score: *score as f64, + } + }).collect(); + + // Adjust ranking for runoff result (swap if needed) + if winner == finalist_b { + if ranking.len() >= 2 { + ranking[0].rank = 2; + ranking[1].rank = 1; + ranking.swap(0, 1); + } + } + + VotingResult { + winner: Some(winner), + ranking, + details: VotingDetails::Star { + score_totals: sorted_scores, + finalists: (finalist_a, finalist_b), + runoff_votes, + }, + total_ballots: ballots.len(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_clear_winner() { + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let c = Uuid::new_v4(); + let options = vec![a, b, c]; + + let ballots = vec![ + ScoreBallot { scores: vec![(a, 5), (b, 3), (c, 1)] }, + ScoreBallot { scores: vec![(a, 5), (b, 2), (c, 0)] }, + ScoreBallot { scores: vec![(a, 4), (b, 4), (c, 2)] }, + ]; + + let result = calculate(&options, &ballots); + assert_eq!(result.winner, Some(a)); + } + + #[test] + fn test_runoff_reversal() { + // A has higher total score, but B is preferred in runoff + let a = Uuid::new_v4(); + let b = Uuid::new_v4(); + let options = vec![a, b]; + + // A gets high scores from few, B gets moderate scores from many + let ballots = vec![ + ScoreBallot { scores: vec![(a, 5), (b, 4)] }, // Prefers A + ScoreBallot { scores: vec![(a, 5), (b, 4)] }, // Prefers A + ScoreBallot { scores: vec![(a, 0), (b, 3)] }, // Prefers B + ScoreBallot { scores: vec![(a, 0), (b, 3)] }, // Prefers B + ScoreBallot { scores: vec![(a, 0), (b, 3)] }, // Prefers B + ]; + + let result = calculate(&options, &ballots); + // A total: 10, B total: 17 -> B and A are finalists + // Runoff: 3 prefer B, 2 prefer A -> B wins + assert_eq!(result.winner, Some(b)); + } +} diff --git a/compose/.env.demo.example b/compose/.env.demo.example new file mode 100644 index 0000000..544f7e4 --- /dev/null +++ b/compose/.env.demo.example @@ -0,0 +1,22 @@ +# Demo Environment Configuration +# Copy to .env.demo and optionally customize + +# Database (separate from production) +POSTGRES_USER=likwid_demo +POSTGRES_PASSWORD=demo_secret_change_me +POSTGRES_DB=likwid_demo +DB_PORT=5433 + +# Backend +JWT_SECRET=demo_jwt_secret_not_for_production +BACKEND_PORT=3001 + +# Frontend +FRONTEND_PORT=4322 +API_BASE=http://localhost:3001 + +# Demo mode is always enabled for this deployment +# This enables: +# - Demo accounts (contributor, moderator, observer) with password: demo123 +# - Pre-seeded communities, proposals, and governance data +# - Restricted destructive actions (cannot delete core demo data) diff --git a/compose/.env.production.example b/compose/.env.production.example new file mode 100644 index 0000000..8b62b07 --- /dev/null +++ b/compose/.env.production.example @@ -0,0 +1,18 @@ +# Production Environment Configuration +# Copy to .env.production and fill in values + +# Database +POSTGRES_USER=likwid +POSTGRES_PASSWORD=CHANGE_THIS_STRONG_PASSWORD +POSTGRES_DB=likwid_prod +DB_PORT=5432 + +# Backend +JWT_SECRET=CHANGE_THIS_TO_RANDOM_64_CHAR_STRING +BACKEND_PORT=3000 + +# Frontend +FRONTEND_PORT=4321 +API_BASE=https://your-domain.com + +# Note: DEMO_MODE is always false for production diff --git a/compose/demo.yml b/compose/demo.yml new file mode 100644 index 0000000..024c486 --- /dev/null +++ b/compose/demo.yml @@ -0,0 +1,61 @@ +version: "3.9" + +# Demo deployment - includes demo users, seed data, and restricted actions +# Usage: podman-compose -f compose/demo.yml up -d +# Reset: podman-compose -f compose/demo.yml down -v && podman-compose -f compose/demo.yml up -d + +services: + postgres: + image: postgres:16 + container_name: likwid-demo-db + restart: unless-stopped + ports: + - "${DB_PORT:-5433}:5432" + environment: + POSTGRES_USER: ${POSTGRES_USER:-likwid_demo} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-demo_secret_change_me} + POSTGRES_DB: ${POSTGRES_DB:-likwid_demo} + volumes: + - likwid_demo_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-likwid_demo}"] + interval: 10s + timeout: 5s + retries: 5 + + backend: + build: + context: ../backend + dockerfile: Dockerfile + container_name: likwid-demo-backend + restart: unless-stopped + ports: + - "${BACKEND_PORT:-3001}:3000" + environment: + DATABASE_URL: postgres://${POSTGRES_USER:-likwid_demo}:${POSTGRES_PASSWORD:-demo_secret_change_me}@postgres:5432/${POSTGRES_DB:-likwid_demo} + JWT_SECRET: ${JWT_SECRET:-demo_jwt_secret_not_for_production} + SERVER_HOST: 0.0.0.0 + SERVER_PORT: 3000 + DEMO_MODE: "true" + RUST_LOG: info + depends_on: + postgres: + condition: service_healthy + + frontend: + build: + context: ../frontend + dockerfile: Dockerfile + args: + API_BASE: ${API_BASE:-http://localhost:3001} + container_name: likwid-demo-frontend + restart: unless-stopped + ports: + - "${FRONTEND_PORT:-4322}:4321" + environment: + API_BASE: ${API_BASE:-http://localhost:3001} + depends_on: + - backend + +volumes: + likwid_demo_data: diff --git a/compose/dev.yml b/compose/dev.yml new file mode 100644 index 0000000..4ce8a56 --- /dev/null +++ b/compose/dev.yml @@ -0,0 +1,17 @@ +version: "3.9" + +services: + postgres: + image: postgres:16 + container_name: likwid-postgres + ports: + - "5432:5432" + environment: + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB} + volumes: + - pgdata:/var/lib/postgresql/data + +volumes: + pgdata: diff --git a/compose/production.yml b/compose/production.yml new file mode 100644 index 0000000..ef066d7 --- /dev/null +++ b/compose/production.yml @@ -0,0 +1,60 @@ +version: "3.9" + +# Production deployment - clean instance without demo data +# Usage: podman-compose -f compose/production.yml up -d + +services: + postgres: + image: postgres:16 + container_name: likwid-prod-db + restart: unless-stopped + ports: + - "${DB_PORT:-5432}:5432" + environment: + POSTGRES_USER: ${POSTGRES_USER:-likwid} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: ${POSTGRES_DB:-likwid_prod} + volumes: + - likwid_prod_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-likwid}"] + interval: 10s + timeout: 5s + retries: 5 + + backend: + build: + context: ../backend + dockerfile: Dockerfile + container_name: likwid-prod-backend + restart: unless-stopped + ports: + - "${BACKEND_PORT:-3000}:3000" + environment: + DATABASE_URL: postgres://${POSTGRES_USER:-likwid}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB:-likwid_prod} + JWT_SECRET: ${JWT_SECRET} + SERVER_HOST: 0.0.0.0 + SERVER_PORT: 3000 + DEMO_MODE: "false" + RUST_LOG: info + depends_on: + postgres: + condition: service_healthy + + frontend: + build: + context: ../frontend + dockerfile: Dockerfile + args: + API_BASE: ${API_BASE:-http://localhost:3000} + container_name: likwid-prod-frontend + restart: unless-stopped + ports: + - "${FRONTEND_PORT:-4321}:4321" + environment: + API_BASE: ${API_BASE:-http://localhost:3000} + depends_on: + - backend + +volumes: + likwid_prod_data: diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..6804f03 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,35 @@ +# Likwid Documentation + +Welcome to the Likwid documentation. This guide covers everything you need to know to use and administer a Likwid instance. + +## Documentation Structure + +### For Users +- [Getting Started](user/getting-started.md) - First steps with Likwid +- [Communities](user/communities.md) - Creating and participating in communities +- [Proposals & Voting](user/voting.md) - Understanding the decision-making process +- [Delegation](user/delegation.md) - How liquid delegation works +- [Account Settings](user/settings.md) - Managing your account + +### For System Administrators +- [Installation](admin/installation.md) - Deploying Likwid +- [Configuration](admin/configuration.md) - Server and instance settings +- [Database](admin/database.md) - PostgreSQL setup and maintenance +- [Plugins](admin/plugins.md) - Managing plugins and voting methods +- [Security](admin/security.md) - Security best practices +- [Backup & Recovery](admin/backup.md) - Data protection + +### Reference +- [API Reference](reference/api.md) - REST API documentation +- [Voting Methods](reference/voting-methods.md) - Detailed voting algorithm explanations +- [Glossary](reference/glossary.md) - Terms and definitions + +## Quick Links + +- **Demo Instance**: Explore a live demo at `/demo` +- **Source Code**: [Codeberg](https://codeberg.org/likwid/likwid) +- **License**: AGPLv3 + +## Contributing + +See [CONTRIBUTING.md](../CONTRIBUTING.md) for guidelines on contributing to Likwid. diff --git a/docs/admin/backup.md b/docs/admin/backup.md new file mode 100644 index 0000000..951629c --- /dev/null +++ b/docs/admin/backup.md @@ -0,0 +1,126 @@ +# Backup & Recovery + +Protecting your Likwid data. + +## What to Backup + +| Component | Location | Priority | +|-----------|----------|----------| +| PostgreSQL database | Database server | Critical | +| Uploaded files | `/uploads` (if configured) | High | +| Configuration | `.env` files | High | +| SSL certificates | Reverse proxy | Medium | + +## Database Backup + +### Manual Backup + +```bash +# Full backup +pg_dump -h localhost -U likwid -F c likwid_prod > backup_$(date +%Y%m%d).dump + +# SQL format (readable) +pg_dump -h localhost -U likwid likwid_prod > backup_$(date +%Y%m%d).sql +``` + +### Automated Backup Script + +```bash +#!/bin/bash +# /etc/cron.daily/likwid-backup + +BACKUP_DIR="/var/backups/likwid" +DATE=$(date +%Y%m%d_%H%M%S) +RETENTION_DAYS=30 + +# Create backup +pg_dump -h localhost -U likwid -F c likwid_prod > "$BACKUP_DIR/likwid_$DATE.dump" + +# Compress +gzip "$BACKUP_DIR/likwid_$DATE.dump" + +# Remove old backups +find "$BACKUP_DIR" -name "*.dump.gz" -mtime +$RETENTION_DAYS -delete + +# Optional: sync to remote storage +# aws s3 cp "$BACKUP_DIR/likwid_$DATE.dump.gz" s3://bucket/backups/ +``` + +### Containerized Backup + +```bash +# If using podman-compose +podman exec likwid-prod-db pg_dump -U likwid likwid_prod > backup.sql +``` + +## Recovery + +### Full Restore + +```bash +# Drop and recreate database +psql -h localhost -U postgres -c "DROP DATABASE IF EXISTS likwid_prod;" +psql -h localhost -U postgres -c "CREATE DATABASE likwid_prod OWNER likwid;" + +# Restore from dump +pg_restore -h localhost -U likwid -d likwid_prod backup.dump + +# Or from SQL +psql -h localhost -U likwid likwid_prod < backup.sql +``` + +### Point-in-Time Recovery + +For critical installations, configure PostgreSQL WAL archiving: + +```ini +# postgresql.conf +archive_mode = on +archive_command = 'cp %p /var/lib/postgresql/archive/%f' +``` + +## Demo Instance Reset + +The demo instance can be reset to initial state: + +```bash +# Windows +.\scripts\demo-reset.ps1 + +# Linux +./scripts/demo-reset.sh +``` + +This removes all data and re-runs migrations with seed data. + +## Disaster Recovery Plan + +### Preparation +1. Document backup procedures +2. Test restores regularly (monthly) +3. Keep offsite backup copies +4. Document recovery steps + +### Recovery Steps +1. Provision new server if needed +2. Install Likwid dependencies +3. Restore database from backup +4. Restore configuration files +5. Start services +6. Verify functionality +7. Update DNS if server changed + +### Recovery Time Objective (RTO) +Target: 4 hours for full recovery + +### Recovery Point Objective (RPO) +Target: 24 hours of data loss maximum (with daily backups) + +## Testing Backups + +Monthly backup test procedure: +1. Create test database +2. Restore backup to test database +3. Run verification queries +4. Document results +5. Delete test database diff --git a/docs/admin/configuration.md b/docs/admin/configuration.md new file mode 100644 index 0000000..2cab7c6 --- /dev/null +++ b/docs/admin/configuration.md @@ -0,0 +1,99 @@ +# Configuration + +Likwid is configured through environment variables and database settings. + +## Environment Variables + +### Backend + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `DATABASE_URL` | Yes | - | PostgreSQL connection string | +| `JWT_SECRET` | Yes | - | Secret for signing JWT tokens | +| `SERVER_HOST` | No | `127.0.0.1` | Bind address | +| `SERVER_PORT` | No | `3000` | HTTP port | +| `DEMO_MODE` | No | `false` | Enable demo features | +| `RUST_LOG` | No | `info` | Log level (trace, debug, info, warn, error) | + +### Frontend + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `API_BASE` | No | `http://localhost:3000` | Backend API URL | +| `PUBLIC_API_BASE` | No | Same as API_BASE | Public-facing API URL | + +## Instance Settings + +Managed via the Admin panel or API: + +### General +- **Instance Name** - Display name for your Likwid instance +- **Instance Description** - Brief description +- **Registration** - Open, invite-only, or closed +- **Email Verification** - Required or optional + +### Features +- **Community Creation** - Who can create communities +- **Public Read Access** - Allow anonymous browsing +- **Federation** - Enable cross-instance communication + +### Plugins +- **Active Voting Methods** - Which methods are available +- **Default Voting Method** - Instance-wide default +- **Active Integrations** - GitLab, Matrix, etc. + +## Community Settings + +Each community can configure: + +```json +{ + "voting_method": "schulze", + "delegation_enabled": true, + "require_read_before_vote": true, + "min_discussion_days": 3, + "quorum_percentage": 25, + "transparency_level": "full" +} +``` + +### Voting Method Options +- `approval` - Approval voting +- `ranked_choice` - Instant runoff +- `schulze` - Condorcet method +- `star` - Score then automatic runoff +- `quadratic` - Voice credit allocation + +### Transparency Levels +- `full` - All votes visible after closing +- `anonymous` - Only totals visible +- `private` - Results only, no breakdown + +## API Configuration + +### Rate Limiting +Configure in backend settings: +- Requests per minute per IP +- Requests per minute per user +- Burst allowance + +### CORS +By default, CORS allows all origins in development. For production: +``` +CORS_ALLOWED_ORIGINS=https://likwid.example.org +``` + +## Logging + +### Log Levels +- `trace` - Very detailed debugging +- `debug` - Debugging information +- `info` - Normal operation +- `warn` - Warning conditions +- `error` - Error conditions + +### Log Format +Logs are output in JSON format for easy parsing: +```json +{"timestamp":"2026-01-27T12:00:00Z","level":"INFO","message":"Server started","port":3000} +``` diff --git a/docs/admin/database.md b/docs/admin/database.md new file mode 100644 index 0000000..76dc754 --- /dev/null +++ b/docs/admin/database.md @@ -0,0 +1,133 @@ +# Database Administration + +Likwid uses PostgreSQL 16+ for data storage. + +## Connection + +```bash +# Connection string format +DATABASE_URL=postgres://user:password@host:port/database + +# Example +DATABASE_URL=postgres://likwid:secret@localhost:5432/likwid_prod +``` + +## Migrations + +Migrations are managed with SQLx. + +### Running Migrations + +```bash +cd backend +export DATABASE_URL="postgres://..." +sqlx migrate run +``` + +### Checking Status + +```bash +sqlx migrate info +``` + +### Creating New Migrations + +```bash +sqlx migrate add +``` + +## Key Tables + +| Table | Purpose | +|-------|---------| +| `users` | User accounts | +| `communities` | Organizations | +| `community_members` | Membership relationships | +| `proposals` | Decision items | +| `proposal_options` | Voting options | +| `votes` | Cast votes | +| `delegations` | Delegation relationships | +| `moderation_log` | Audit trail | +| `comments` | Discussion comments | +| `topics` | Categorization for delegation | + +## Backup + +### Full Backup + +```bash +pg_dump -h localhost -U likwid likwid_prod > backup_$(date +%Y%m%d).sql +``` + +### Automated Backups + +Set up a cron job: +```bash +0 3 * * * pg_dump -h localhost -U likwid likwid_prod | gzip > /backups/likwid_$(date +\%Y\%m\%d).sql.gz +``` + +### Restore + +```bash +psql -h localhost -U likwid likwid_prod < backup.sql +``` + +## Maintenance + +### Vacuum + +Run periodically to reclaim space: +```sql +VACUUM ANALYZE; +``` + +### Index Maintenance + +```sql +REINDEX DATABASE likwid_prod; +``` + +### Connection Monitoring + +```sql +SELECT * FROM pg_stat_activity WHERE datname = 'likwid_prod'; +``` + +## Performance + +### Key Indexes + +Likwid creates indexes on: +- User lookups (username, email) +- Community slugs +- Proposal status and dates +- Vote relationships + +### Query Analysis + +```sql +EXPLAIN ANALYZE SELECT * FROM proposals WHERE community_id = '...'; +``` + +## Security + +### User Permissions + +Create a dedicated database user: +```sql +CREATE USER likwid WITH PASSWORD 'strong_password'; +GRANT ALL PRIVILEGES ON DATABASE likwid_prod TO likwid; +``` + +### Connection Limits + +```sql +ALTER USER likwid CONNECTION LIMIT 50; +``` + +### SSL Connections + +For production, require SSL: +``` +DATABASE_URL=postgres://user:pass@host/db?sslmode=require +``` diff --git a/docs/admin/installation.md b/docs/admin/installation.md new file mode 100644 index 0000000..4c79ae0 --- /dev/null +++ b/docs/admin/installation.md @@ -0,0 +1,124 @@ +# Installation Guide + +This guide covers deploying Likwid for production use. + +## Requirements + +- **PostgreSQL 16+** +- **Rust 1.75+** (for building backend) +- **Node.js 20+** (for building frontend) +- **Container runtime** (Podman or Docker) - optional but recommended + +## Quick Start with Containers + +### 1. Clone the Repository + +```bash +git clone https://codeberg.org/likwid/likwid.git +cd likwid +``` + +### 2. Configure Environment + +```bash +cp compose/.env.production.example compose/.env.production +# Edit .env.production with your settings +``` + +Required settings: +- `POSTGRES_PASSWORD` - Strong database password +- `JWT_SECRET` - Random 64+ character string + +### 3. Deploy + +```bash +cd compose +podman-compose --env-file .env.production -f production.yml up -d +``` + +### 4. Access + +- Frontend: http://localhost:4321 +- Backend API: http://localhost:3000 + +## Manual Installation + +### Backend + +```bash +cd backend + +# Install dependencies and build +cargo build --release + +# Run migrations +export DATABASE_URL="postgres://user:pass@localhost/likwid" +sqlx migrate run + +# Start server +./target/release/likwid +``` + +### Frontend + +```bash +cd frontend + +# Install dependencies +npm ci + +# Build for production +npm run build + +# Start server +node ./dist/server/entry.mjs +``` + +## Configuration Files + +| File | Purpose | +|------|---------| +| `compose/production.yml` | Production container deployment | +| `compose/demo.yml` | Demo instance deployment | +| `compose/.env.production.example` | Environment template | +| `backend/.env` | Backend configuration | + +## Reverse Proxy + +For production, use a reverse proxy (nginx, Caddy) with: +- HTTPS termination +- WebSocket support (for real-time features) +- Proper headers + +Example nginx config: + +```nginx +server { + listen 443 ssl http2; + server_name likwid.example.org; + + ssl_certificate /path/to/cert.pem; + ssl_certificate_key /path/to/key.pem; + + location / { + proxy_pass http://127.0.0.1:4321; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + + location /api { + proxy_pass http://127.0.0.1:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } +} +``` + +## Next Steps + +- [Configuration](configuration.md) - Detailed settings +- [Database](database.md) - Database management +- [Security](security.md) - Hardening your instance diff --git a/docs/admin/plugins.md b/docs/admin/plugins.md new file mode 100644 index 0000000..c579a7b --- /dev/null +++ b/docs/admin/plugins.md @@ -0,0 +1,95 @@ +# Plugin Management + +Likwid uses a plugin architecture for extensibility. + +## Plugin Types + +### Core Plugins (Cannot Disable) +- `core.auth` - Authentication system +- `core.communities` - Community management +- `core.proposals` - Proposal system + +### Voting Plugins +- `voting.approval` - Approval voting +- `voting.ranked_choice` - Instant runoff +- `voting.schulze` - Condorcet method +- `voting.star` - STAR voting +- `voting.quadratic` - Quadratic voting + +### Feature Plugins +- `feature.delegation` - Liquid delegation +- `feature.deliberation` - Structured discussion + +### Integration Plugins +- `integration.gitlab` - GitLab integration +- `integration.matrix` - Matrix chat integration + +## Managing Plugins + +### Via Admin Panel + +1. Go to **Admin** → **Plugins** +2. View installed plugins +3. Enable/disable as needed +4. Configure plugin settings + +### Via API + +```bash +# List plugins +curl -H "Authorization: Bearer $TOKEN" \ + http://localhost:3000/api/plugins/defaults + +# Enable plugin +curl -X POST -H "Authorization: Bearer $TOKEN" \ + http://localhost:3000/api/plugins/instance/voting.quadratic + +# Disable plugin +curl -X DELETE -H "Authorization: Bearer $TOKEN" \ + http://localhost:3000/api/plugins/instance/voting.quadratic +``` + +## Voting Method Configuration + +### Platform Level +Enable/disable voting methods for the entire instance: + +```bash +# List available methods +curl http://localhost:3000/api/voting-methods + +# Enable a method +curl -X PUT -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"is_active": true}' \ + http://localhost:3000/api/voting-methods/quadratic +``` + +### Community Level +Communities can configure which enabled methods they use: + +```bash +# Set community voting methods +curl -X PUT -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"methods": ["schulze", "approval"]}' \ + http://localhost:3000/api/communities/{id}/voting-methods +``` + +## Plugin Security + +### Signed Plugins +For third-party plugins, require signatures: +- Instance setting: `require_signed_plugins: true` +- Validates plugin authenticity + +### Capabilities +Plugins request specific capabilities: +- Database access (read/write) +- Outbound HTTP +- Background jobs +- UI components + +## Developing Plugins + +See the [Plugin Development Guide](../reference/plugin-development.md) for creating custom plugins. diff --git a/docs/admin/security.md b/docs/admin/security.md new file mode 100644 index 0000000..703188c --- /dev/null +++ b/docs/admin/security.md @@ -0,0 +1,101 @@ +# Security Best Practices + +Securing your Likwid instance. + +## Authentication + +### JWT Tokens +- Use a strong, random `JWT_SECRET` (64+ characters) +- Tokens expire after 24 hours by default +- Refresh tokens are not stored server-side + +### Password Policy +- Minimum 8 characters (configurable) +- Bcrypt hashing with cost factor 12 +- No password in logs or error messages + +### Two-Factor Authentication +Enable 2FA support for users: +- TOTP (Time-based One-Time Password) +- Backup codes for recovery + +## Network Security + +### HTTPS +Always use HTTPS in production: +- Obtain certificates (Let's Encrypt recommended) +- Configure reverse proxy for TLS termination +- Enable HSTS headers + +### CORS +Restrict CORS in production: +``` +CORS_ALLOWED_ORIGINS=https://likwid.example.org +``` + +### Rate Limiting +Protect against abuse: +- 100 requests/minute per IP (default) +- 1000 requests/minute per authenticated user +- Configurable per endpoint + +## Database Security + +### Connection +- Use SSL for database connections +- Dedicated database user with minimal privileges +- Strong, unique password + +### Backups +- Regular automated backups +- Encrypted backup storage +- Test restore procedures + +## API Security + +### Input Validation +All inputs are validated: +- Type checking +- Length limits +- Sanitization + +### SQL Injection +- Parameterized queries only (SQLx) +- No raw SQL string concatenation + +### XSS Prevention +- HTML escaping in templates +- Content Security Policy headers +- No inline scripts in production + +## Moderation Audit Trail + +All moderation actions are logged: +- Who performed the action +- What action was taken +- Why (reason required) +- When it happened + +Logs are immutable and tamper-evident. + +## Updates + +Keep Likwid updated: +- Watch the repository for security announcements +- Apply patches promptly +- Test updates in staging first + +## Incident Response + +If you discover a security issue: +1. Document the incident +2. Assess impact +3. Contain the breach +4. Notify affected users if required +5. Report to Likwid security team + +## Reporting Vulnerabilities + +Report security issues to: security@likwid.org + +We follow responsible disclosure practices. diff --git a/docs/reference/api.md b/docs/reference/api.md new file mode 100644 index 0000000..db15602 --- /dev/null +++ b/docs/reference/api.md @@ -0,0 +1,256 @@ +# API Reference + +Likwid exposes a REST API for all functionality. + +## Base URL + +``` +http://localhost:3000/api +``` + +## Authentication + +Most endpoints require a JWT token: + +```bash +curl -H "Authorization: Bearer YOUR_TOKEN" http://localhost:3000/api/... +``` + +### Login + +```http +POST /api/auth/login +Content-Type: application/json + +{ + "username": "user", + "password": "password" +} +``` + +Response: +```json +{ + "token": "eyJ...", + "user": { + "id": "uuid", + "username": "user", + "display_name": "User Name" + } +} +``` + +### Register + +```http +POST /api/auth/register +Content-Type: application/json + +{ + "username": "newuser", + "email": "user@example.com", + "password": "password", + "display_name": "New User" +} +``` + +## Communities + +### List Communities + +```http +GET /api/communities +``` + +### Get Community + +```http +GET /api/communities/{id} +``` + +### Create Community + +```http +POST /api/communities +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "name": "My Community", + "slug": "my-community", + "description": "Description here" +} +``` + +### Join Community + +```http +POST /api/communities/{id}/join +Authorization: Bearer TOKEN +``` + +### Leave Community + +```http +POST /api/communities/{id}/leave +Authorization: Bearer TOKEN +``` + +## Proposals + +### List Proposals + +```http +GET /api/communities/{id}/proposals +``` + +### Get Proposal + +```http +GET /api/proposals/{id} +``` + +### Create Proposal + +```http +POST /api/communities/{id}/proposals +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "title": "Proposal Title", + "description": "Full description", + "voting_method": "approval", + "options": [ + {"label": "Option A", "description": "..."}, + {"label": "Option B", "description": "..."} + ] +} +``` + +### Vote + +```http +POST /api/proposals/{id}/vote +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "option_ids": ["uuid1", "uuid2"] +} +``` + +### Vote (Ranked) + +```http +POST /api/proposals/{id}/vote/ranked +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "rankings": [ + {"option_id": "uuid1", "rank": 1}, + {"option_id": "uuid2", "rank": 2} + ] +} +``` + +### Vote (Quadratic) + +```http +POST /api/proposals/{id}/vote/quadratic +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "allocations": { + "uuid1": 5, + "uuid2": 3 + } +} +``` + +## Delegations + +### List Delegations + +```http +GET /api/delegations +Authorization: Bearer TOKEN +``` + +### Create Delegation + +```http +POST /api/delegations +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "delegate_id": "user-uuid", + "community_id": "community-uuid", + "scope": "topic", + "topic_id": "topic-uuid" +} +``` + +### Revoke Delegation + +```http +DELETE /api/delegations/{id} +Authorization: Bearer TOKEN +``` + +## Users + +### Get Current User + +```http +GET /api/users/me +Authorization: Bearer TOKEN +``` + +### Update Profile + +```http +PUT /api/users/me +Authorization: Bearer TOKEN +Content-Type: application/json + +{ + "display_name": "New Name", + "bio": "Updated bio" +} +``` + +## Demo Endpoints + +### Demo Status + +```http +GET /api/demo/status +``` + +### Reset Demo (Admin) + +```http +POST /api/demo/reset +Authorization: Bearer TOKEN +``` + +## Error Responses + +```json +{ + "error": "Error message", + "code": "ERROR_CODE" +} +``` + +Common status codes: +- `400` - Bad request +- `401` - Unauthorized +- `403` - Forbidden +- `404` - Not found +- `422` - Validation error +- `500` - Server error diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md new file mode 100644 index 0000000..aaac3be --- /dev/null +++ b/docs/reference/glossary.md @@ -0,0 +1,102 @@ +# Glossary + +Key terms and definitions used in Likwid. + +## A + +### Approval Voting +A voting method where voters can approve any number of options. Options are ranked by total approvals. + +## C + +### Community +An organizational unit in Likwid representing a group with shared governance needs. + +### Condorcet Winner +An option that would beat every other option in a head-to-head comparison. The Schulze method always selects a Condorcet winner if one exists. + +## D + +### Delegate +A user who receives delegated voting power from other users. + +### Delegation +The act of entrusting your vote to another user. Can be topic-based or community-wide. + +### Deliberation +The discussion phase of a proposal where members debate and refine ideas before voting. + +## I + +### Instance +A deployment of Likwid. Each instance is independent and can have its own configuration. + +### Instant Runoff +See Ranked Choice Voting. + +## L + +### Liquid Democracy +A form of democracy combining direct and representative democracy. Voters can vote directly or delegate their vote, and can change delegation at any time. + +## M + +### Moderation Log +A transparent, auditable record of all moderation actions taken in a community. + +### Member +A user who has joined a community and can participate in governance. + +## P + +### Plugin +A modular component that extends Likwid's functionality. Voting methods, integrations, and features are implemented as plugins. + +### Proposal +A decision item submitted to a community for deliberation and voting. + +### Pseudonym +A separate identity used for voting to preserve ballot privacy while maintaining accountability. + +## Q + +### Quadratic Voting +A voting method where the cost to cast multiple votes increases quadratically. Used to express intensity of preference. + +### Quorum +The minimum participation required for a vote to be valid. + +## R + +### Ranked Choice Voting +A voting method where voters rank options. Also called Instant Runoff Voting (IRV). + +## S + +### Schulze Method +A Condorcet voting method that uses pairwise comparisons and strongest paths to determine the winner. + +### Scope +The extent of a delegation - can be community-wide or limited to specific topics. + +### STAR Voting +Score Then Automatic Runoff - voters rate options 0-5 stars, top two enter automatic runoff. + +## T + +### Topic +A category or subject area used to organize proposals and delegations. + +### Transitive Delegation +When delegation chains are followed (A delegates to B, B delegates to C, so A's vote goes to C). + +## V + +### Voice Credits +The currency in quadratic voting that voters allocate across options. + +### Voting Identity +A pseudonymous identity separate from civic identity, used to maintain ballot privacy. + +### Voting Method +The algorithm used to aggregate individual votes into a collective decision. diff --git a/docs/reference/voting-methods.md b/docs/reference/voting-methods.md new file mode 100644 index 0000000..6a705de --- /dev/null +++ b/docs/reference/voting-methods.md @@ -0,0 +1,141 @@ +# Voting Methods Reference + +Detailed explanations of the voting methods available in Likwid. + +## Approval Voting + +### How It Works +- Voters select all options they approve of +- Each selection counts as one vote +- Options are ranked by total approvals + +### Best For +- Simple yes/no decisions +- Selecting multiple winners +- Low cognitive load + +### Example +``` +Options: A, B, C, D +Voter 1 approves: A, B +Voter 2 approves: B, C +Voter 3 approves: A, C + +Results: +A: 2 votes +B: 2 votes +C: 2 votes +D: 0 votes +``` + +## Ranked Choice (Instant Runoff) + +### How It Works +1. Voters rank options from most to least preferred +2. If no option has majority, eliminate lowest +3. Redistribute eliminated votes to next preference +4. Repeat until winner has majority + +### Best For +- Single winner elections +- Reducing strategic voting +- Finding consensus candidate + +### Example +``` +Round 1: A=40%, B=35%, C=25% +(C eliminated, votes transfer) +Round 2: A=45%, B=55% +Winner: B +``` + +## Schulze Method + +### How It Works +1. Create pairwise comparison matrix +2. Find strongest paths between all pairs +3. Option X beats Y if strongest path X→Y > Y→X +4. Winner beats all others (Condorcet winner) + +### Best For +- Complex multi-option decisions +- When Condorcet winner exists +- Technical/policy decisions + +### Properties +- Condorcet consistent +- Clone independent +- Reversal symmetric + +## STAR Voting + +### How It Works +1. Voters rate each option 0-5 stars +2. Sum all ratings for each option +3. Top two scorers enter automatic runoff +4. In runoff, option preferred by more voters wins + +### Best For +- Balancing expressiveness and simplicity +- Reducing strategic voting +- When intensity of preference matters + +### Example +``` +Scores: A=4.2 avg, B=3.8 avg, C=3.5 avg +Runoff: A vs B +Voters preferring A: 55% +Voters preferring B: 45% +Winner: A +``` + +## Quadratic Voting + +### How It Works +- Each voter receives fixed voice credits (default: 100) +- Cost to cast N votes for an option = N² +- 1 vote = 1 credit, 2 votes = 4 credits, 3 votes = 9 credits +- Voters allocate credits across options + +### Best For +- Expressing intensity of preference +- Resource allocation decisions +- Preventing tyranny of majority + +### Example +``` +100 credits available +Option A: 5 votes (25 credits) +Option B: 3 votes (9 credits) +Option C: 8 votes (64 credits) +Remaining: 2 credits +``` + +### Strategic Considerations +- Spreading votes is efficient +- Strong preferences cost exponentially more +- Encourages honest preference revelation + +## Method Comparison + +| Method | Complexity | Expressiveness | Strategic Resistance | +|--------|------------|----------------|---------------------| +| Approval | Low | Low | Medium | +| Ranked Choice | Medium | High | Medium | +| Schulze | High | High | High | +| STAR | Medium | High | High | +| Quadratic | Medium | Very High | High | + +## Choosing a Method + +### For Simple Decisions +Use **Approval** - easy to understand, quick to vote. + +### For Elections +Use **Ranked Choice** or **STAR** - finds consensus, reduces spoiler effect. + +### For Technical Decisions +Use **Schulze** - handles complex preference structures. + +### For Budget/Resource Allocation +Use **Quadratic** - captures intensity of preference. diff --git a/docs/user/communities.md b/docs/user/communities.md new file mode 100644 index 0000000..66a2316 --- /dev/null +++ b/docs/user/communities.md @@ -0,0 +1,61 @@ +# Communities + +Communities are the organizational units in Likwid. Each community represents a group with shared governance needs. + +## What is a Community? + +A community in Likwid can represent: +- An open source project +- A civic organization or NGO +- A political movement or party +- A professional association +- A federated network of local chapters + +## Community Structure + +### Members +- **Admin** - Full control over community settings +- **Moderator** - Can moderate content and manage proposals +- **Member** - Can participate, vote, and create proposals +- **Observer** - Read-only access (in some configurations) + +### Settings +Each community can configure: +- Default voting method +- Delegation rules +- Proposal requirements +- Moderation policies + +## Joining a Community + +1. Navigate to the community page +2. Click **Join Community** +3. Wait for approval if required +4. Start participating! + +## Creating a Community + +If enabled on your instance: +1. Go to **Communities** → **Create Community** +2. Provide: + - Name and slug (URL identifier) + - Description + - Initial settings +3. Invite founding members + +## Community Governance + +Each community manages its own: +- **Proposals** - Decisions to be made +- **Delegation networks** - Trust relationships +- **Moderation log** - Transparent action history +- **Voting methods** - How decisions are made + +## Leaving a Community + +You can leave a community at any time: +1. Go to the community page +2. Click **Leave Community** +3. Confirm your choice + +Your past votes remain recorded but anonymized. diff --git a/docs/user/delegation.md b/docs/user/delegation.md new file mode 100644 index 0000000..133f8e1 --- /dev/null +++ b/docs/user/delegation.md @@ -0,0 +1,75 @@ +# Liquid Delegation + +Delegation allows you to entrust your vote to someone you trust, while retaining the ability to vote directly when you choose. + +## How Delegation Works + +1. You choose a delegate for a specific scope +2. When a proposal matches that scope, your delegate votes for you +3. You can always override by voting directly +4. You can revoke delegation at any time + +## Delegation Scopes + +### Community-Wide +Your delegate votes on all proposals in a community. + +### Topic-Based +Your delegate only votes on proposals tagged with specific topics: +- Architecture +- Budget +- Policy +- Security +- etc. + +## Creating a Delegation + +1. Go to **Settings** → **Delegations** +2. Click **Add Delegation** +3. Select: + - The delegate (user you trust) + - The community + - The scope (all topics or specific topics) +4. Confirm + +## Finding Delegates + +Look for users who: +- Have a delegate profile with stated positions +- Are active and informed in the community +- Share your values on relevant topics + +Delegate profiles show: +- Their voting policy +- Number of current delegators +- Topic expertise + +## Delegation Chains + +Delegations can be transitive: +- You delegate to Alice +- Alice delegates to Bob +- Bob's vote counts for all three + +Likwid detects and prevents cycles. + +## Transparency + +All delegations are visible: +- See who delegates to whom +- Understand how votes flow +- Accountability is built-in + +## Revoking Delegation + +1. Go to **Settings** → **Delegations** +2. Find the delegation to revoke +3. Click **Revoke** +4. The delegation ends immediately + +## Best Practices + +- **Delegate to active members** who participate regularly +- **Use topic-based delegation** for specialized decisions +- **Review your delegations** periodically +- **Vote directly** on issues you care deeply about diff --git a/docs/user/getting-started.md b/docs/user/getting-started.md new file mode 100644 index 0000000..435fee9 --- /dev/null +++ b/docs/user/getting-started.md @@ -0,0 +1,64 @@ +# Getting Started with Likwid + +Likwid is a modular governance platform for distributed organizations. This guide will help you get started as a participant. + +## Creating an Account + +1. Navigate to your Likwid instance +2. Click **Register** in the top navigation +3. Fill in your details: + - Username (unique identifier) + - Email address + - Display name (shown to others) + - Password +4. Verify your email if required by the instance + +## Exploring Without an Account + +Many Likwid instances allow public browsing: +- View communities and their descriptions +- Read proposals and their discussions +- See voting results and delegation networks +- Explore moderation logs + +To participate (vote, comment, create proposals), you'll need an account. + +## Your First Steps + +### 1. Browse Communities +Visit the **Communities** page to see available organizations. Each community has: +- A description of its purpose +- Active proposals +- Member list +- Governance settings + +### 2. Join a Community +Click on a community to view its details, then click **Join** to become a member. Some communities may require approval. + +### 3. Read Active Proposals +Before voting, read the full proposal text and deliberation history. Likwid encourages informed participation. + +### 4. Participate +- **Comment** on proposals during the discussion phase +- **Vote** when voting opens +- **Delegate** your vote to trusted members on specific topics + +## Understanding the Interface + +### Navigation +- **Communities** - Browse and join organizations +- **Proposals** - View all proposals across communities +- **Dashboard** - Your personalized activity feed (logged in) +- **Settings** - Account and notification preferences + +### Proposal Statuses +- **Draft** - Being written, not yet published +- **Discussion** - Open for comments and deliberation +- **Voting** - Voting is active +- **Closed** - Voting complete, decision made + +## Next Steps + +- Learn about [Communities](communities.md) +- Understand [Voting Methods](voting.md) +- Explore [Delegation](delegation.md) diff --git a/docs/user/settings.md b/docs/user/settings.md new file mode 100644 index 0000000..cf4c666 --- /dev/null +++ b/docs/user/settings.md @@ -0,0 +1,73 @@ +# Account Settings + +Manage your Likwid account and preferences. + +## Profile Settings + +### Display Name +The name shown to other users. Can be different from your username. + +### Bio +Optional description visible on your profile. + +### Avatar +Upload a profile picture (if enabled on the instance). + +## Security + +### Password +Change your password: +1. Go to **Settings** → **Security** +2. Enter current password +3. Enter new password twice +4. Save + +### Two-Factor Authentication +If available: +1. Enable 2FA in security settings +2. Scan QR code with authenticator app +3. Enter verification code +4. Save backup codes securely + +## Notifications + +Configure how you receive updates: + +| Notification Type | Options | +|-------------------|---------| +| Proposal updates | Email, In-app, None | +| Vote reminders | Email, In-app, None | +| Delegation activity | Email, In-app, None | +| Mentions | Email, In-app, None | +| Moderation actions | Email, In-app, None | + +## Privacy + +### Voting Identity +Your civic identity (username) and voting identity (pseudonym) are separate. This allows: +- Public participation in discussions +- Private voting + +### Data Export +Request a copy of your data: +1. Go to **Settings** → **Privacy** +2. Click **Export My Data** +3. Download when ready + +## Theme + +Choose your preferred appearance: +- **Light** - Light background, dark text +- **Dark** - Dark background, light text +- **System** - Follow your device settings + +## Danger Zone + +### Delete Account +Permanently delete your account: +1. Go to **Settings** → **Account** +2. Click **Delete Account** +3. Confirm with password +4. Your account is permanently removed + +Note: Past votes remain in the system but are anonymized. diff --git a/docs/user/voting.md b/docs/user/voting.md new file mode 100644 index 0000000..46343f7 --- /dev/null +++ b/docs/user/voting.md @@ -0,0 +1,83 @@ +# Proposals & Voting + +Likwid supports multiple voting methods to match different decision-making needs. + +## Proposal Lifecycle + +### 1. Draft +The author creates a proposal with: +- Title and description +- Voting options +- Selected voting method + +### 2. Discussion Phase +- Members read and discuss the proposal +- Comments are organized for constructive deliberation +- The author may refine the proposal based on feedback + +### 3. Voting Phase +- Voting opens for a defined period +- Members cast votes using the specified method +- Delegated votes are resolved + +### 4. Closed +- Results are calculated and displayed +- The decision is recorded permanently + +## Voting Methods + +### Approval Voting +**Best for:** Simple yes/no decisions or selecting multiple options + +- Select all options you approve of +- Options are ranked by total approvals +- Simple and intuitive + +### Ranked Choice (Instant Runoff) +**Best for:** Electing a single winner from many candidates + +- Rank options from most to least preferred +- Lowest-ranked options are eliminated in rounds +- Votes transfer to next preference + +### Schulze Method +**Best for:** Complex decisions requiring nuanced preferences + +- Rank all options +- Uses pairwise comparisons +- Condorcet-consistent (beats all others head-to-head) + +### STAR Voting (Score Then Automatic Runoff) +**Best for:** Balancing expressiveness with simplicity + +- Rate each option 0-5 stars +- Top two scorers enter automatic runoff +- Prevents strategic voting + +### Quadratic Voting +**Best for:** Expressing intensity of preference + +- Allocate voice credits across options +- Cost increases quadratically (1 vote = 1 credit, 2 votes = 4 credits) +- Prevents tyranny of the majority + +## Casting Your Vote + +1. Open the proposal during the voting phase +2. Follow the voting method instructions +3. Submit your vote +4. You can change your vote until voting closes + +## Delegation and Voting + +If you've delegated your vote on a topic: +- Your delegate votes on your behalf +- You can override by voting directly +- Delegations are transparent + +## Viewing Results + +After voting closes: +- See total vote counts +- View method-specific details (rounds, pairwise matrices) +- Results are permanent and auditable diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 0000000..16d54bb --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,24 @@ +# build output +dist/ +# generated types +.astro/ + +# dependencies +node_modules/ + +# logs +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + + +# environment variables +.env +.env.production + +# macOS-specific files +.DS_Store + +# jetbrains setting folder +.idea/ diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..4148376 --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,43 @@ +# Likwid Frontend Dockerfile +FROM node:20-slim as builder + +WORKDIR /app + +# Copy package files +COPY package*.json ./ + +# Install dependencies +RUN npm ci + +# Copy source +COPY . . + +# Build argument for API base URL +ARG API_BASE=http://localhost:3000 + +# Set environment for build +ENV PUBLIC_API_BASE=$API_BASE + +# Build the application +RUN npm run build + +# Runtime stage +FROM node:20-slim + +WORKDIR /app + +# Copy built application +COPY --from=builder /app/dist ./dist +COPY --from=builder /app/node_modules ./node_modules +COPY --from=builder /app/package.json ./ + +# Create non-root user +RUN useradd -r -s /bin/false likwid +USER likwid + +EXPOSE 4321 + +ENV HOST=0.0.0.0 +ENV PORT=4321 + +CMD ["node", "./dist/server/entry.mjs"] diff --git a/frontend/astro.config.mjs b/frontend/astro.config.mjs new file mode 100644 index 0000000..57519a3 --- /dev/null +++ b/frontend/astro.config.mjs @@ -0,0 +1,8 @@ +// @ts-check +import { defineConfig } from 'astro/config'; +import node from '@astrojs/node'; + +// https://astro.build/config +export default defineConfig({ + adapter: node({ mode: 'standalone' }), +}); diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..294388d --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,5193 @@ +{ + "name": "frontend", + "version": "0.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "frontend", + "version": "0.0.1", + "dependencies": { + "@astrojs/node": "^9.0.0", + "astro": "^5.16.15" + } + }, + "node_modules/@astrojs/compiler": { + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/@astrojs/compiler/-/compiler-2.13.0.tgz", + "integrity": "sha512-mqVORhUJViA28fwHYaWmsXSzLO9osbdZ5ImUfxBarqsYdMlPbqAqGJCxsNzvppp1BEzc1mJNjOVvQqeDN8Vspw==", + "license": "MIT" + }, + "node_modules/@astrojs/internal-helpers": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@astrojs/internal-helpers/-/internal-helpers-0.7.5.tgz", + "integrity": "sha512-vreGnYSSKhAjFJCWAwe/CNhONvoc5lokxtRoZims+0wa3KbHBdPHSSthJsKxPd8d/aic6lWKpRTYGY/hsgK6EA==", + "license": "MIT" + }, + "node_modules/@astrojs/markdown-remark": { + "version": "6.3.10", + "resolved": "https://registry.npmjs.org/@astrojs/markdown-remark/-/markdown-remark-6.3.10.tgz", + "integrity": "sha512-kk4HeYR6AcnzC4QV8iSlOfh+N8TZ3MEStxPyenyCtemqn8IpEATBFMTJcfrNW32dgpt6MY3oCkMM/Tv3/I4G3A==", + "license": "MIT", + "dependencies": { + "@astrojs/internal-helpers": "0.7.5", + "@astrojs/prism": "3.3.0", + "github-slugger": "^2.0.0", + "hast-util-from-html": "^2.0.3", + "hast-util-to-text": "^4.0.2", + "import-meta-resolve": "^4.2.0", + "js-yaml": "^4.1.1", + "mdast-util-definitions": "^6.0.0", + "rehype-raw": "^7.0.0", + "rehype-stringify": "^10.0.1", + "remark-gfm": "^4.0.1", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.1.2", + "remark-smartypants": "^3.0.2", + "shiki": "^3.19.0", + "smol-toml": "^1.5.2", + "unified": "^11.0.5", + "unist-util-remove-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "unist-util-visit-parents": "^6.0.2", + "vfile": "^6.0.3" + } + }, + "node_modules/@astrojs/node": { + "version": "9.5.2", + "resolved": "https://registry.npmjs.org/@astrojs/node/-/node-9.5.2.tgz", + "integrity": "sha512-85/x+FRwbNGDip1TzSGMiak31/6LvBhA8auqd9lLoHaM5XElk+uIfIr3KjJqucDojE0PtiLk1lMSwD9gd3YlGg==", + "license": "MIT", + "dependencies": { + "@astrojs/internal-helpers": "0.7.5", + "send": "^1.2.1", + "server-destroy": "^1.0.1" + }, + "peerDependencies": { + "astro": "^5.14.3" + } + }, + "node_modules/@astrojs/prism": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@astrojs/prism/-/prism-3.3.0.tgz", + "integrity": "sha512-q8VwfU/fDZNoDOf+r7jUnMC2//H2l0TuQ6FkGJL8vD8nw/q5KiL3DS1KKBI3QhI9UQhpJ5dc7AtqfbXWuOgLCQ==", + "license": "MIT", + "dependencies": { + "prismjs": "^1.30.0" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0" + } + }, + "node_modules/@astrojs/telemetry": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@astrojs/telemetry/-/telemetry-3.3.0.tgz", + "integrity": "sha512-UFBgfeldP06qu6khs/yY+q1cDAaArM2/7AEIqQ9Cuvf7B1hNLq0xDrZkct+QoIGyjq56y8IaE2I3CTvG99mlhQ==", + "license": "MIT", + "dependencies": { + "ci-info": "^4.2.0", + "debug": "^4.4.0", + "dlv": "^1.1.3", + "dset": "^3.1.4", + "is-docker": "^3.0.0", + "is-wsl": "^3.1.0", + "which-pm-runs": "^1.1.0" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.6.tgz", + "integrity": "sha512-TeR9zWR18BvbfPmGbLampPMW+uW1NZnJlRuuHso8i87QZNq2JRF9i6RgxRqtEq+wQGsS19NNTWr2duhnE49mfQ==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.6" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.6.tgz", + "integrity": "sha512-0ZrskXVEHSWIqZM/sQZ4EV3jZJXRkio/WCxaqKZP1g//CEWEPSfeZFcms4XeKBCHU0ZKnIkdJeU/kF+eRp5lBg==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@capsizecss/unpack": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@capsizecss/unpack/-/unpack-4.0.0.tgz", + "integrity": "sha512-VERIM64vtTP1C4mxQ5thVT9fK0apjPFobqybMtA1UdUujWka24ERHbRHFGmpbbhp73MhV+KSsHQH9C6uOTdEQA==", + "license": "MIT", + "dependencies": { + "fontkitten": "^1.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz", + "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@img/colour": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.0.0.tgz", + "integrity": "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz", + "integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz", + "integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz", + "integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz", + "integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz", + "integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz", + "integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-ppc64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz", + "integrity": "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==", + "cpu": [ + "ppc64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-riscv64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz", + "integrity": "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==", + "cpu": [ + "riscv64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz", + "integrity": "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz", + "integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz", + "integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz", + "integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz", + "integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz", + "integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-ppc64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz", + "integrity": "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==", + "cpu": [ + "ppc64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-ppc64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-riscv64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz", + "integrity": "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==", + "cpu": [ + "riscv64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-riscv64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz", + "integrity": "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz", + "integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz", + "integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz", + "integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz", + "integrity": "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.7.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz", + "integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz", + "integrity": "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz", + "integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@oslojs/encoding": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@oslojs/encoding/-/encoding-1.1.0.tgz", + "integrity": "sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ==", + "license": "MIT" + }, + "node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", + "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/pluginutils/node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.56.0.tgz", + "integrity": "sha512-LNKIPA5k8PF1+jAFomGe3qN3bbIgJe/IlpDBwuVjrDKrJhVWywgnJvflMt/zkbVNLFtF1+94SljYQS6e99klnw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.56.0.tgz", + "integrity": "sha512-lfbVUbelYqXlYiU/HApNMJzT1E87UPGvzveGg2h0ktUNlOCxKlWuJ9jtfvs1sKHdwU4fzY7Pl8sAl49/XaEk6Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.56.0.tgz", + "integrity": "sha512-EgxD1ocWfhoD6xSOeEEwyE7tDvwTgZc8Bss7wCWe+uc7wO8G34HHCUH+Q6cHqJubxIAnQzAsyUsClt0yFLu06w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.56.0.tgz", + "integrity": "sha512-1vXe1vcMOssb/hOF8iv52A7feWW2xnu+c8BV4t1F//m9QVLTfNVpEdja5ia762j/UEJe2Z1jAmEqZAK42tVW3g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.56.0.tgz", + "integrity": "sha512-bof7fbIlvqsyv/DtaXSck4VYQ9lPtoWNFCB/JY4snlFuJREXfZnm+Ej6yaCHfQvofJDXLDMTVxWscVSuQvVWUQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.56.0.tgz", + "integrity": "sha512-KNa6lYHloW+7lTEkYGa37fpvPq+NKG/EHKM8+G/g9WDU7ls4sMqbVRV78J6LdNuVaeeK5WB9/9VAFbKxcbXKYg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.56.0.tgz", + "integrity": "sha512-E8jKK87uOvLrrLN28jnAAAChNq5LeCd2mGgZF+fGF5D507WlG/Noct3lP/QzQ6MrqJ5BCKNwI9ipADB6jyiq2A==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.56.0.tgz", + "integrity": "sha512-jQosa5FMYF5Z6prEpTCCmzCXz6eKr/tCBssSmQGEeozA9tkRUty/5Vx06ibaOP9RCrW1Pvb8yp3gvZhHwTDsJw==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.56.0.tgz", + "integrity": "sha512-uQVoKkrC1KGEV6udrdVahASIsaF8h7iLG0U0W+Xn14ucFwi6uS539PsAr24IEF9/FoDtzMeeJXJIBo5RkbNWvQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.56.0.tgz", + "integrity": "sha512-vLZ1yJKLxhQLFKTs42RwTwa6zkGln+bnXc8ueFGMYmBTLfNu58sl5/eXyxRa2RarTkJbXl8TKPgfS6V5ijNqEA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.56.0.tgz", + "integrity": "sha512-FWfHOCub564kSE3xJQLLIC/hbKqHSVxy8vY75/YHHzWvbJL7aYJkdgwD/xGfUlL5UV2SB7otapLrcCj2xnF1dg==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.56.0.tgz", + "integrity": "sha512-z1EkujxIh7nbrKL1lmIpqFTc/sr0u8Uk0zK/qIEFldbt6EDKWFk/pxFq3gYj4Bjn3aa9eEhYRlL3H8ZbPT1xvA==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.56.0.tgz", + "integrity": "sha512-iNFTluqgdoQC7AIE8Q34R3AuPrJGJirj5wMUErxj22deOcY7XwZRaqYmB6ZKFHoVGqRcRd0mqO+845jAibKCkw==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.56.0.tgz", + "integrity": "sha512-MtMeFVlD2LIKjp2sE2xM2slq3Zxf9zwVuw0jemsxvh1QOpHSsSzfNOTH9uYW9i1MXFxUSMmLpeVeUzoNOKBaWg==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.56.0.tgz", + "integrity": "sha512-in+v6wiHdzzVhYKXIk5U74dEZHdKN9KH0Q4ANHOTvyXPG41bajYRsy7a8TPKbYPl34hU7PP7hMVHRvv/5aCSew==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.56.0.tgz", + "integrity": "sha512-yni2raKHB8m9NQpI9fPVwN754mn6dHQSbDTwxdr9SE0ks38DTjLMMBjrwvB5+mXrX+C0npX0CVeCUcvvvD8CNQ==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.56.0.tgz", + "integrity": "sha512-zhLLJx9nQPu7wezbxt2ut+CI4YlXi68ndEve16tPc/iwoylWS9B3FxpLS2PkmfYgDQtosah07Mj9E0khc3Y+vQ==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.56.0.tgz", + "integrity": "sha512-MVC6UDp16ZSH7x4rtuJPAEoE1RwS8N4oK9DLHy3FTEdFoUTCFVzMfJl/BVJ330C+hx8FfprA5Wqx4FhZXkj2Kw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.56.0.tgz", + "integrity": "sha512-ZhGH1eA4Qv0lxaV00azCIS1ChedK0V32952Md3FtnxSqZTBTd6tgil4nZT5cU8B+SIw3PFYkvyR4FKo2oyZIHA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.56.0.tgz", + "integrity": "sha512-O16XcmyDeFI9879pEcmtWvD/2nyxR9mF7Gs44lf1vGGx8Vg2DRNx11aVXBEqOQhWb92WN4z7fW/q4+2NYzCbBA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.56.0.tgz", + "integrity": "sha512-LhN/Reh+7F3RCgQIRbgw8ZMwUwyqJM+8pXNT6IIJAqm2IdKkzpCh/V9EdgOMBKuebIrzswqy4ATlrDgiOwbRcQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.56.0.tgz", + "integrity": "sha512-kbFsOObXp3LBULg1d3JIUQMa9Kv4UitDmpS+k0tinPBz3watcUiV2/LUDMMucA6pZO3WGE27P7DsfaN54l9ing==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.56.0.tgz", + "integrity": "sha512-vSSgny54D6P4vf2izbtFm/TcWYedw7f8eBrOiGGecyHyQB9q4Kqentjaj8hToe+995nob/Wv48pDqL5a62EWtg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.56.0.tgz", + "integrity": "sha512-FeCnkPCTHQJFbiGG49KjV5YGW/8b9rrXAM2Mz2kiIoktq2qsJxRD5giEMEOD2lPdgs72upzefaUvS+nc8E3UzQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.56.0.tgz", + "integrity": "sha512-H8AE9Ur/t0+1VXujj90w0HrSOuv0Nq9r1vSZF2t5km20NTfosQsGGUXDaKdQZzwuLts7IyL1fYT4hM95TI9c4g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@shikijs/core": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.21.0.tgz", + "integrity": "sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.21.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5" + } + }, + "node_modules/@shikijs/engine-javascript": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.21.0.tgz", + "integrity": "sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.21.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.4" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.21.0.tgz", + "integrity": "sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.21.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.21.0.tgz", + "integrity": "sha512-g6mn5m+Y6GBJ4wxmBYqalK9Sp0CFkUqfNzUy2pJglUginz6ZpWbaWjDB4fbQ/8SHzFjYbtU6Ddlp1pc+PPNDVA==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.21.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.21.0.tgz", + "integrity": "sha512-BAE4cr9EDiZyYzwIHEk7JTBJ9CzlPuM4PchfcA5ao1dWXb25nv6hYsoDiBq2aZK9E3dlt3WB78uI96UESD+8Mw==", + "license": "MIT", + "dependencies": { + "@shikijs/types": "3.21.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.21.0.tgz", + "integrity": "sha512-zGrWOxZ0/+0ovPY7PvBU2gIS9tmhSUUt30jAcNV0Bq0gb2S98gwfjIs1vxlmH5zM7/4YxLamT6ChlqqAJmPPjA==", + "license": "MIT", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + "license": "MIT" + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/nlcst": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/nlcst/-/nlcst-2.0.3.tgz", + "integrity": "sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "license": "ISC", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-iterate": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/array-iterate/-/array-iterate-2.0.1.tgz", + "integrity": "sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/astro": { + "version": "5.16.15", + "resolved": "https://registry.npmjs.org/astro/-/astro-5.16.15.tgz", + "integrity": "sha512-+X1Z0NTi2pa5a0Te6h77Dgc44fYj63j1yx6+39Nvg05lExajxSq7b1Uj/gtY45zoum8fD0+h0nak+DnHighs3A==", + "license": "MIT", + "peer": true, + "dependencies": { + "@astrojs/compiler": "^2.13.0", + "@astrojs/internal-helpers": "0.7.5", + "@astrojs/markdown-remark": "6.3.10", + "@astrojs/telemetry": "3.3.0", + "@capsizecss/unpack": "^4.0.0", + "@oslojs/encoding": "^1.1.0", + "@rollup/pluginutils": "^5.3.0", + "acorn": "^8.15.0", + "aria-query": "^5.3.2", + "axobject-query": "^4.1.0", + "boxen": "8.0.1", + "ci-info": "^4.3.1", + "clsx": "^2.1.1", + "common-ancestor-path": "^1.0.1", + "cookie": "^1.1.1", + "cssesc": "^3.0.0", + "debug": "^4.4.3", + "deterministic-object-hash": "^2.0.2", + "devalue": "^5.6.2", + "diff": "^8.0.3", + "dlv": "^1.1.3", + "dset": "^3.1.4", + "es-module-lexer": "^1.7.0", + "esbuild": "^0.25.0", + "estree-walker": "^3.0.3", + "flattie": "^1.1.1", + "fontace": "~0.4.0", + "github-slugger": "^2.0.0", + "html-escaper": "3.0.3", + "http-cache-semantics": "^4.2.0", + "import-meta-resolve": "^4.2.0", + "js-yaml": "^4.1.1", + "magic-string": "^0.30.21", + "magicast": "^0.5.1", + "mrmime": "^2.0.1", + "neotraverse": "^0.6.18", + "p-limit": "^6.2.0", + "p-queue": "^8.1.1", + "package-manager-detector": "^1.6.0", + "piccolore": "^0.1.3", + "picomatch": "^4.0.3", + "prompts": "^2.4.2", + "rehype": "^13.0.2", + "semver": "^7.7.3", + "shiki": "^3.21.0", + "smol-toml": "^1.6.0", + "svgo": "^4.0.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tsconfck": "^3.1.6", + "ultrahtml": "^1.6.0", + "unifont": "~0.7.3", + "unist-util-visit": "^5.0.0", + "unstorage": "^1.17.4", + "vfile": "^6.0.3", + "vite": "^6.4.1", + "vitefu": "^1.1.1", + "xxhash-wasm": "^1.1.0", + "yargs-parser": "^21.1.1", + "yocto-spinner": "^0.2.3", + "zod": "^3.25.76", + "zod-to-json-schema": "^3.25.1", + "zod-to-ts": "^1.2.0" + }, + "bin": { + "astro": "astro.js" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0", + "npm": ">=9.6.5", + "pnpm": ">=7.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/astrodotbuild" + }, + "optionalDependencies": { + "sharp": "^0.34.0" + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "license": "Apache-2.0", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/base-64": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base-64/-/base-64-1.0.0.tgz", + "integrity": "sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==", + "license": "MIT" + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "license": "ISC" + }, + "node_modules/boxen": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz", + "integrity": "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^8.0.0", + "chalk": "^5.3.0", + "cli-boxes": "^3.0.0", + "string-width": "^7.2.0", + "type-fest": "^4.21.0", + "widest-line": "^5.0.0", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/camelcase": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-8.0.0.tgz", + "integrity": "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-5.0.0.tgz", + "integrity": "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==", + "license": "MIT", + "dependencies": { + "readdirp": "^5.0.0" + }, + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/ci-info": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.3.1.tgz", + "integrity": "sha512-Wdy2Igu8OcBpI2pZePZ5oWjPC38tmDVx5WKUXKwlLYkA0ozo85sLsLvkBbBn/sZaSCMFOGZJ14fvW9t5/d7kdA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz", + "integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", + "license": "MIT", + "engines": { + "node": ">=16" + } + }, + "node_modules/common-ancestor-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/common-ancestor-path/-/common-ancestor-path-1.0.1.tgz", + "integrity": "sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w==", + "license": "ISC" + }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/cookie-es": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-es/-/cookie-es-1.2.2.tgz", + "integrity": "sha512-+W7VmiVINB+ywl1HGXJXmrqkOhpKrIiVZV6tQuV54ZyQC7MMuBt81Vc336GMLoHBq5hV/F9eXgt5Mnx0Rha5Fg==", + "license": "MIT" + }, + "node_modules/crossws": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/crossws/-/crossws-0.3.5.tgz", + "integrity": "sha512-ojKiDvcmByhwa8YYqbQI/hg7MEU0NC03+pSdEq4ZUnZR9xXpwk7E43SMNGkn+JxJGPFtNvQ48+vV2p+P1ml5PA==", + "license": "MIT", + "dependencies": { + "uncrypto": "^0.1.3" + } + }, + "node_modules/css-select": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.2.2.tgz", + "integrity": "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz", + "integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.12.2", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.2.2.tgz", + "integrity": "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "license": "MIT", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==", + "license": "CC0-1.0" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "license": "MIT" + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/destr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/destr/-/destr-2.0.5.tgz", + "integrity": "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==", + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/deterministic-object-hash": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/deterministic-object-hash/-/deterministic-object-hash-2.0.2.tgz", + "integrity": "sha512-KxektNH63SrbfUyDiwXqRb1rLwKt33AmMv+5Nhsw1kqZ13SJBRTgZHtGbE+hH3a1mVW1cz+4pqSWVPAtLVXTzQ==", + "license": "MIT", + "dependencies": { + "base-64": "^1.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/devalue": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/devalue/-/devalue-5.6.2.tgz", + "integrity": "sha512-nPRkjWzzDQlsejL1WVifk5rvcFi/y1onBRxjaFMjZeR9mFpqu2gmAZ9xUB9/IEanEP/vBtGeGganC/GO1fmufg==", + "license": "MIT" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/diff": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.3.tgz", + "integrity": "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "license": "MIT" + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/dom-serializer/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "BSD-2-Clause" + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", + "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dset": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/dset/-/dset-3.1.4.tgz", + "integrity": "sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", + "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==", + "license": "MIT" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/flattie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flattie/-/flattie-1.1.1.tgz", + "integrity": "sha512-9UbaD6XdAL97+k/n+N7JwX46K/M6Zc6KcFYskrYL8wbBV/Uyk0CTAMY0VT+qiK5PM7AIc9aTWYtq65U7T+aCNQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/fontace": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/fontace/-/fontace-0.4.0.tgz", + "integrity": "sha512-moThBCItUe2bjZip5PF/iZClpKHGLwMvR79Kp8XpGRBrvoRSnySN4VcILdv3/MJzbhvUA5WeiUXF5o538m5fvg==", + "license": "MIT", + "dependencies": { + "fontkitten": "^1.0.0" + } + }, + "node_modules/fontkitten": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/fontkitten/-/fontkitten-1.0.2.tgz", + "integrity": "sha512-piJxbLnkD9Xcyi7dWJRnqszEURixe7CrF/efBfbffe2DPyabmuIuqraruY8cXTs19QoM8VJzx47BDRVNXETM7Q==", + "license": "MIT", + "dependencies": { + "tiny-inflate": "^1.0.3" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", + "integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==", + "license": "ISC" + }, + "node_modules/h3": { + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/h3/-/h3-1.15.5.tgz", + "integrity": "sha512-xEyq3rSl+dhGX2Lm0+eFQIAzlDN6Fs0EcC4f7BNUmzaRX/PTzeuM+Tr2lHB8FoXggsQIeXLj8EDVgs5ywxyxmg==", + "license": "MIT", + "dependencies": { + "cookie-es": "^1.2.2", + "crossws": "^0.3.5", + "defu": "^6.1.4", + "destr": "^2.0.5", + "iron-webcrypto": "^1.2.1", + "node-mock-http": "^1.0.4", + "radix3": "^1.1.2", + "ufo": "^1.6.3", + "uncrypto": "^0.1.3" + } + }, + "node_modules/hast-util-from-html": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz", + "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.1.0", + "hast-util-from-parse5": "^8.0.0", + "parse5": "^7.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-element": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", + "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz", + "integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", + "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unist-util-find-after": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-escaper": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-3.0.3.tgz", + "integrity": "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ==", + "license": "MIT" + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "license": "BSD-2-Clause" + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/import-meta-resolve": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/import-meta-resolve/-/import-meta-resolve-4.2.0.tgz", + "integrity": "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/iron-webcrypto": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iron-webcrypto/-/iron-webcrypto-1.2.1.tgz", + "integrity": "sha512-feOM6FaSr6rEABp/eDfVseKyTMDt+KGpeB35SkVn9Tyn0CqvVsY3EwI0v5i8nMHyJnzCIQf7nsy3p41TPkJZhg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/brc-dd" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/magicast": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.1.tgz", + "integrity": "sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "source-map-js": "^1.2.1" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-definitions": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-6.0.0.tgz", + "integrity": "sha512-scTllyX6pnYNZH/AIp/0ePz6s4cZtARxImwoPJ7kS42n+MnVsI4XbnG6d4ibehRIldYMWM2LD7ImQblVhUejVQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz", + "integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==", + "license": "CC0-1.0" + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/neotraverse": { + "version": "0.6.18", + "resolved": "https://registry.npmjs.org/neotraverse/-/neotraverse-0.6.18.tgz", + "integrity": "sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/nlcst-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-4.0.0.tgz", + "integrity": "sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/node-fetch-native": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/node-fetch-native/-/node-fetch-native-1.6.7.tgz", + "integrity": "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==", + "license": "MIT" + }, + "node_modules/node-mock-http": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/node-mock-http/-/node-mock-http-1.0.4.tgz", + "integrity": "sha512-8DY+kFsDkNXy1sJglUfuODx1/opAGJGyrTuFqEoN90oRc2Vk0ZbD4K2qmKXBBEhZQzdKHIVfEJpDU8Ak2NJEvQ==", + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/ofetch": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/ofetch/-/ofetch-1.5.1.tgz", + "integrity": "sha512-2W4oUZlVaqAPAil6FUg/difl6YhqhUR7x2eZY4bQCko22UXg3hptq9KLQdqFClV+Wu85UX7hNtdGTngi/1BxcA==", + "license": "MIT", + "dependencies": { + "destr": "^2.0.5", + "node-fetch-native": "^1.6.7", + "ufo": "^1.6.1" + } + }, + "node_modules/ohash": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-2.0.11.tgz", + "integrity": "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==", + "license": "MIT" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/oniguruma-parser": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz", + "integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==", + "license": "MIT" + }, + "node_modules/oniguruma-to-es": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.4.tgz", + "integrity": "sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==", + "license": "MIT", + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.0.1", + "regex-recursion": "^6.0.2" + } + }, + "node_modules/p-limit": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-6.2.0.tgz", + "integrity": "sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA==", + "license": "MIT", + "dependencies": { + "yocto-queue": "^1.1.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-8.1.1.tgz", + "integrity": "sha512-aNZ+VfjobsWryoiPnEApGGmf5WmNsCo9xu8dfaYamG5qaLP7ClhLN6NgsFe6SwJ2UbLEBK5dv9x8Mn5+RVhMWQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^5.0.1", + "p-timeout": "^6.1.2" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-6.1.4.tgz", + "integrity": "sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-manager-detector": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.6.0.tgz", + "integrity": "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==", + "license": "MIT" + }, + "node_modules/parse-latin": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse-latin/-/parse-latin-7.0.0.tgz", + "integrity": "sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0", + "@types/unist": "^3.0.0", + "nlcst-to-string": "^4.0.0", + "unist-util-modify-children": "^4.0.0", + "unist-util-visit-children": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/piccolore": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/piccolore/-/piccolore-0.1.3.tgz", + "integrity": "sha512-o8bTeDWjE086iwKrROaDf31K0qC/BENdm15/uH9usSC/uZjJOKb2YGiVHfLY4GhwsERiPI1jmwI2XrA7ACOxVw==", + "license": "ISC" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/radix3": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/radix3/-/radix3-1.1.2.tgz", + "integrity": "sha512-b484I/7b8rDEdSDKckSSBA8knMpcdsXudlE/LNL639wFoHKwLbEkQFZHWEYwDC0wa0FKUcCY+GAF73Z7wxNVFA==", + "license": "MIT" + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/readdirp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-5.0.0.tgz", + "integrity": "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==", + "license": "MIT", + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/regex/-/regex-6.1.0.tgz", + "integrity": "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-recursion": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz", + "integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + "license": "MIT", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-utilities": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + "license": "MIT" + }, + "node_modules/rehype": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/rehype/-/rehype-13.0.2.tgz", + "integrity": "sha512-j31mdaRFrwFRUIlxGeuPXXKWQxet52RBQRvCmzl5eCefn/KGbomK5GMHNMsOJf55fgo3qw5tST5neDuarDYR2A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "rehype-parse": "^9.0.0", + "rehype-stringify": "^10.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-9.0.1.tgz", + "integrity": "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-from-html": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-stringify": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/rehype-stringify/-/rehype-stringify-10.0.1.tgz", + "integrity": "sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-to-html": "^9.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-smartypants": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/remark-smartypants/-/remark-smartypants-3.0.2.tgz", + "integrity": "sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA==", + "license": "MIT", + "dependencies": { + "retext": "^9.0.0", + "retext-smartypants": "^6.0.0", + "unified": "^11.0.4", + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/retext/-/retext-9.0.0.tgz", + "integrity": "sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0", + "retext-latin": "^4.0.0", + "retext-stringify": "^4.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-latin": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/retext-latin/-/retext-latin-4.0.0.tgz", + "integrity": "sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0", + "parse-latin": "^7.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-smartypants": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/retext-smartypants/-/retext-smartypants-6.2.0.tgz", + "integrity": "sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0", + "nlcst-to-string": "^4.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-stringify": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/retext-stringify/-/retext-stringify-4.0.0.tgz", + "integrity": "sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA==", + "license": "MIT", + "dependencies": { + "@types/nlcst": "^2.0.0", + "nlcst-to-string": "^4.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rollup": { + "version": "4.56.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.56.0.tgz", + "integrity": "sha512-9FwVqlgUHzbXtDg9RCMgodF3Ua4Na6Gau+Sdt9vyCN4RhHfVKX2DCHy3BjMLTDd47ITDhYAnTwGulWTblJSDLg==", + "license": "MIT", + "peer": true, + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.56.0", + "@rollup/rollup-android-arm64": "4.56.0", + "@rollup/rollup-darwin-arm64": "4.56.0", + "@rollup/rollup-darwin-x64": "4.56.0", + "@rollup/rollup-freebsd-arm64": "4.56.0", + "@rollup/rollup-freebsd-x64": "4.56.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.56.0", + "@rollup/rollup-linux-arm-musleabihf": "4.56.0", + "@rollup/rollup-linux-arm64-gnu": "4.56.0", + "@rollup/rollup-linux-arm64-musl": "4.56.0", + "@rollup/rollup-linux-loong64-gnu": "4.56.0", + "@rollup/rollup-linux-loong64-musl": "4.56.0", + "@rollup/rollup-linux-ppc64-gnu": "4.56.0", + "@rollup/rollup-linux-ppc64-musl": "4.56.0", + "@rollup/rollup-linux-riscv64-gnu": "4.56.0", + "@rollup/rollup-linux-riscv64-musl": "4.56.0", + "@rollup/rollup-linux-s390x-gnu": "4.56.0", + "@rollup/rollup-linux-x64-gnu": "4.56.0", + "@rollup/rollup-linux-x64-musl": "4.56.0", + "@rollup/rollup-openbsd-x64": "4.56.0", + "@rollup/rollup-openharmony-arm64": "4.56.0", + "@rollup/rollup-win32-arm64-msvc": "4.56.0", + "@rollup/rollup-win32-ia32-msvc": "4.56.0", + "@rollup/rollup-win32-x64-gnu": "4.56.0", + "@rollup/rollup-win32-x64-msvc": "4.56.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/sax": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.4.tgz", + "integrity": "sha512-1n3r/tGXO6b6VXMdFT54SHzT9ytu9yr7TaELowdYpMqY/Ao7EnlQGmAQ1+RatX7Tkkdm6hONI2owqNx2aZj5Sw==", + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=11.0.0" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/server-destroy": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/server-destroy/-/server-destroy-1.0.1.tgz", + "integrity": "sha512-rb+9B5YBIEzYcD6x2VKidaa+cqYBJQKnU4oe4E3ANwRRN56yk/ua1YCJT1n21NTS8w6CcOclAKNP3PhdCXKYtQ==", + "license": "ISC" + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/sharp": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.5.tgz", + "integrity": "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "@img/colour": "^1.0.0", + "detect-libc": "^2.1.2", + "semver": "^7.7.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.34.5", + "@img/sharp-darwin-x64": "0.34.5", + "@img/sharp-libvips-darwin-arm64": "1.2.4", + "@img/sharp-libvips-darwin-x64": "1.2.4", + "@img/sharp-libvips-linux-arm": "1.2.4", + "@img/sharp-libvips-linux-arm64": "1.2.4", + "@img/sharp-libvips-linux-ppc64": "1.2.4", + "@img/sharp-libvips-linux-riscv64": "1.2.4", + "@img/sharp-libvips-linux-s390x": "1.2.4", + "@img/sharp-libvips-linux-x64": "1.2.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", + "@img/sharp-libvips-linuxmusl-x64": "1.2.4", + "@img/sharp-linux-arm": "0.34.5", + "@img/sharp-linux-arm64": "0.34.5", + "@img/sharp-linux-ppc64": "0.34.5", + "@img/sharp-linux-riscv64": "0.34.5", + "@img/sharp-linux-s390x": "0.34.5", + "@img/sharp-linux-x64": "0.34.5", + "@img/sharp-linuxmusl-arm64": "0.34.5", + "@img/sharp-linuxmusl-x64": "0.34.5", + "@img/sharp-wasm32": "0.34.5", + "@img/sharp-win32-arm64": "0.34.5", + "@img/sharp-win32-ia32": "0.34.5", + "@img/sharp-win32-x64": "0.34.5" + } + }, + "node_modules/shiki": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.21.0.tgz", + "integrity": "sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w==", + "license": "MIT", + "dependencies": { + "@shikijs/core": "3.21.0", + "@shikijs/engine-javascript": "3.21.0", + "@shikijs/engine-oniguruma": "3.21.0", + "@shikijs/langs": "3.21.0", + "@shikijs/themes": "3.21.0", + "@shikijs/types": "3.21.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "license": "MIT" + }, + "node_modules/smol-toml": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/smol-toml/-/smol-toml-1.6.0.tgz", + "integrity": "sha512-4zemZi0HvTnYwLfrpk/CF9LOd9Lt87kAt50GnqhMpyF9U3poDAP2+iukq2bZsO/ufegbYehBkqINbsWxj4l4cw==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 18" + }, + "funding": { + "url": "https://github.com/sponsors/cyyynthia" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/svgo": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-4.0.0.tgz", + "integrity": "sha512-VvrHQ+9uniE+Mvx3+C9IEe/lWasXCU0nXMY2kZeLrHNICuRiC8uMPyM14UEaMOFA5mhyQqEkB02VoQ16n3DLaw==", + "license": "MIT", + "dependencies": { + "commander": "^11.1.0", + "css-select": "^5.1.0", + "css-tree": "^3.0.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.1.1", + "sax": "^1.4.1" + }, + "bin": { + "svgo": "bin/svgo.js" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/tiny-inflate": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-inflate/-/tiny-inflate-1.0.3.tgz", + "integrity": "sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==", + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tsconfck": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/tsconfck/-/tsconfck-3.1.6.tgz", + "integrity": "sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==", + "license": "MIT", + "bin": { + "tsconfck": "bin/tsconfck.js" + }, + "engines": { + "node": "^18 || >=20" + }, + "peerDependencies": { + "typescript": "^5.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD", + "optional": true + }, + "node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.3.tgz", + "integrity": "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==", + "license": "MIT" + }, + "node_modules/ultrahtml": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/ultrahtml/-/ultrahtml-1.6.0.tgz", + "integrity": "sha512-R9fBn90VTJrqqLDwyMph+HGne8eqY1iPfYhPzZrvKpIfwkWZbcYlfpsb8B9dTvBfpy1/hqAD7Wi8EKfP9e8zdw==", + "license": "MIT" + }, + "node_modules/uncrypto": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/uncrypto/-/uncrypto-0.1.3.tgz", + "integrity": "sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q==", + "license": "MIT" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unifont": { + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/unifont/-/unifont-0.7.3.tgz", + "integrity": "sha512-b0GtQzKCyuSHGsfj5vyN8st7muZ6VCI4XD4vFlr7Uy1rlWVYxC3npnfk8MyreHxJYrz1ooLDqDzFe9XqQTlAhA==", + "license": "MIT", + "dependencies": { + "css-tree": "^3.1.0", + "ofetch": "^1.5.1", + "ohash": "^2.0.11" + } + }, + "node_modules/unist-util-find-after": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", + "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-modify-children": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-modify-children/-/unist-util-modify-children-4.0.0.tgz", + "integrity": "sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "array-iterate": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", + "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-children": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit-children/-/unist-util-visit-children-3.0.0.tgz", + "integrity": "sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unstorage": { + "version": "1.17.4", + "resolved": "https://registry.npmjs.org/unstorage/-/unstorage-1.17.4.tgz", + "integrity": "sha512-fHK0yNg38tBiJKp/Vgsq4j0JEsCmgqH58HAn707S7zGkArbZsVr/CwINoi+nh3h98BRCwKvx1K3Xg9u3VV83sw==", + "license": "MIT", + "dependencies": { + "anymatch": "^3.1.3", + "chokidar": "^5.0.0", + "destr": "^2.0.5", + "h3": "^1.15.5", + "lru-cache": "^11.2.0", + "node-fetch-native": "^1.6.7", + "ofetch": "^1.5.1", + "ufo": "^1.6.3" + }, + "peerDependencies": { + "@azure/app-configuration": "^1.8.0", + "@azure/cosmos": "^4.2.0", + "@azure/data-tables": "^13.3.0", + "@azure/identity": "^4.6.0", + "@azure/keyvault-secrets": "^4.9.0", + "@azure/storage-blob": "^12.26.0", + "@capacitor/preferences": "^6 || ^7 || ^8", + "@deno/kv": ">=0.9.0", + "@netlify/blobs": "^6.5.0 || ^7.0.0 || ^8.1.0 || ^9.0.0 || ^10.0.0", + "@planetscale/database": "^1.19.0", + "@upstash/redis": "^1.34.3", + "@vercel/blob": ">=0.27.1", + "@vercel/functions": "^2.2.12 || ^3.0.0", + "@vercel/kv": "^1 || ^2 || ^3", + "aws4fetch": "^1.0.20", + "db0": ">=0.2.1", + "idb-keyval": "^6.2.1", + "ioredis": "^5.4.2", + "uploadthing": "^7.4.4" + }, + "peerDependenciesMeta": { + "@azure/app-configuration": { + "optional": true + }, + "@azure/cosmos": { + "optional": true + }, + "@azure/data-tables": { + "optional": true + }, + "@azure/identity": { + "optional": true + }, + "@azure/keyvault-secrets": { + "optional": true + }, + "@azure/storage-blob": { + "optional": true + }, + "@capacitor/preferences": { + "optional": true + }, + "@deno/kv": { + "optional": true + }, + "@netlify/blobs": { + "optional": true + }, + "@planetscale/database": { + "optional": true + }, + "@upstash/redis": { + "optional": true + }, + "@vercel/blob": { + "optional": true + }, + "@vercel/functions": { + "optional": true + }, + "@vercel/kv": { + "optional": true + }, + "aws4fetch": { + "optional": true + }, + "db0": { + "optional": true + }, + "idb-keyval": { + "optional": true + }, + "ioredis": { + "optional": true + }, + "uploadthing": { + "optional": true + } + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", + "integrity": "sha512-+Oxm7q9hDoLMyJOYfUYBuHQo+dkAloi33apOPP56pzj+vsdJDzr+j1NISE5pyaAuKL4A3UD34qd0lx5+kfKp2g==", + "license": "MIT", + "peer": true, + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitefu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/vitefu/-/vitefu-1.1.1.tgz", + "integrity": "sha512-B/Fegf3i8zh0yFbpzZ21amWzHmuNlLlmJT6n7bu5e+pCHUKQIfXSYokrqOBGEMMe9UG2sostKQF9mml/vYaWJQ==", + "license": "MIT", + "workspaces": [ + "tests/deps/*", + "tests/projects/*", + "tests/projects/workspace/packages/*" + ], + "peerDependencies": { + "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0" + }, + "peerDependenciesMeta": { + "vite": { + "optional": true + } + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/which-pm-runs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.1.0.tgz", + "integrity": "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/widest-line": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-5.0.0.tgz", + "integrity": "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==", + "license": "MIT", + "dependencies": { + "string-width": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/xxhash-wasm": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/xxhash-wasm/-/xxhash-wasm-1.1.0.tgz", + "integrity": "sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA==", + "license": "MIT" + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.2.tgz", + "integrity": "sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==", + "license": "MIT", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yocto-spinner": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/yocto-spinner/-/yocto-spinner-0.2.3.tgz", + "integrity": "sha512-sqBChb33loEnkoXte1bLg45bEBsOP9N1kzQh5JZNKj/0rik4zAPTNSAVPj3uQAdc6slYJ0Ksc403G2XgxsJQFQ==", + "license": "MIT", + "dependencies": { + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": ">=18.19" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.2.tgz", + "integrity": "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.1.tgz", + "integrity": "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.25 || ^4" + } + }, + "node_modules/zod-to-ts": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/zod-to-ts/-/zod-to-ts-1.2.0.tgz", + "integrity": "sha512-x30XE43V+InwGpvTySRNz9kB7qFU8DlyEy7BsSTCHPH1R0QasMmHWZDCzYm6bVXtj/9NNJAZF3jW8rzFvH5OFA==", + "peerDependencies": { + "typescript": "^4.9.4 || ^5.0.2", + "zod": "^3" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..cb0b37c --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,15 @@ +{ + "name": "frontend", + "type": "module", + "version": "0.0.1", + "scripts": { + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview", + "astro": "astro" + }, + "dependencies": { + "astro": "^5.16.15", + "@astrojs/node": "^9.0.0" + } +} \ No newline at end of file diff --git a/frontend/public/favicon.ico b/frontend/public/favicon.ico new file mode 100644 index 0000000..7f48a94 Binary files /dev/null and b/frontend/public/favicon.ico differ diff --git a/frontend/public/favicon.svg b/frontend/public/favicon.svg new file mode 100644 index 0000000..f157bd1 --- /dev/null +++ b/frontend/public/favicon.svg @@ -0,0 +1,9 @@ + + + + diff --git a/frontend/src/components/AdminNav.astro b/frontend/src/components/AdminNav.astro new file mode 100644 index 0000000..9634294 --- /dev/null +++ b/frontend/src/components/AdminNav.astro @@ -0,0 +1,136 @@ +--- +interface Props { + currentPage; +} + +const { currentPage } = Astro.props; + +const navItems = [ + { href: '/admin/settings', label: 'Instance Settings', icon: '⚙️' }, + { href: '/admin/users', label: 'Users', icon: '👥' }, + { href: '/admin/communities', label: 'Communities', icon: '🏘️' }, + { href: '/admin/approvals', label: 'Approvals', icon: '✅' }, + { href: '/admin/invitations', label: 'Invitations', icon: '📨' }, + { href: '/admin/roles', label: 'Roles & Permissions', icon: '🔐' }, + { href: '/admin/plugins', label: 'Plugins', icon: '🧩' }, + { href: '/admin/voting', label: 'Voting Methods', icon: '🗳️' }, +]; +--- + + + + diff --git a/frontend/src/components/ThemeSwitcher.astro b/frontend/src/components/ThemeSwitcher.astro new file mode 100644 index 0000000..15c9040 --- /dev/null +++ b/frontend/src/components/ThemeSwitcher.astro @@ -0,0 +1,70 @@ +--- + import { getAllThemes } from '../lib/themes'; + + const themes = getAllThemes(); +--- + +
+ + +
+ + + + diff --git a/frontend/src/components/icons/VotingIcons.astro b/frontend/src/components/icons/VotingIcons.astro new file mode 100644 index 0000000..1a5d470 --- /dev/null +++ b/frontend/src/components/icons/VotingIcons.astro @@ -0,0 +1,249 @@ +--- +/** + * Voting Method Icons - SVG Symbol Library + * + * Usage: once in layout, then use + */ +--- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ² + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/frontend/src/components/moderation/LedgerViewer.astro b/frontend/src/components/moderation/LedgerViewer.astro new file mode 100644 index 0000000..5514796 --- /dev/null +++ b/frontend/src/components/moderation/LedgerViewer.astro @@ -0,0 +1,798 @@ +--- +export interface Props { + communityId?; + compact?; +} + +const { communityId, compact = false } = Astro.props; +import { API_BASE } from '../../lib/api'; +--- + +
+ +
+
+
+ +
+
+

Moderation Ledger

+

Immutable record of all moderation decisions

+
+
+
+ + +
+
+ + +
+
+ +
+ Chain integrity not yet verified +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ Loading ledger entries... +
+
+ + +
+ + Page 1 + +
+ + + +
+ + + + diff --git a/frontend/src/components/voting/DelegationGraph.astro b/frontend/src/components/voting/DelegationGraph.astro new file mode 100644 index 0000000..1482ad5 --- /dev/null +++ b/frontend/src/components/voting/DelegationGraph.astro @@ -0,0 +1,674 @@ +--- +/** + * DelegationGraph - Visual representation of delegation chains + * Shows who delegates to whom with interactive exploration + */ +interface Props { + userId?; + communityId?; + compact?; +} + +const { userId, communityId, compact = false } = Astro.props; +--- + +
+
+
+ +

Delegation Network

+
+
+ + +
+
+ +
+
+
+ Loading delegation data... +
+ + + +
+ +
+
+
+ You delegate to +
+
+
+ Delegates to you +
+
+
+ Transitive chain +
+
+
+ + + + diff --git a/frontend/src/components/voting/VotingMethodCard.astro b/frontend/src/components/voting/VotingMethodCard.astro new file mode 100644 index 0000000..840e7cf --- /dev/null +++ b/frontend/src/components/voting/VotingMethodCard.astro @@ -0,0 +1,297 @@ +--- +/** + * VotingMethodCard - Visual explainer card for a voting method + * Shows icon, name, description, and interactive example + */ +interface Props { + method: 'approval' | 'ranked_choice' | 'schulze' | 'star' | 'quadratic'; + compact?; + selected?; + interactive?; +} + +const { method, compact = false, selected = false, interactive = false } = Astro.props; + +const methodData = { + approval: { + name: 'Approval Voting', + icon: 'icon-approval', + color: '#22c55e', + shortDesc: 'Select all options you approve', + fullDesc: 'Vote for as many options as you like. The option with the most approvals wins. Simple and reduces strategic voting.', + complexity: 1, + pros: ['Simple to understand', 'No spoiler effect', 'Encourages honest voting'], + cons: ['No preference intensity', 'May produce ties'], + }, + ranked_choice: { + name: 'Ranked Choice', + icon: 'icon-ranked-choice', + color: '#3b82f6', + shortDesc: 'Rank options in order of preference', + fullDesc: 'Rank candidates from first to last choice. If no majority, lowest candidate eliminated and votes redistributed until majority.', + complexity: 2, + pros: ['Eliminates spoiler effect', 'Majority winner', 'Expresses preferences'], + cons: ['More complex counting', 'Non-monotonic edge cases'], + }, + schulze: { + name: 'Schulze Method', + icon: 'icon-schulze', + color: '#8b5cf6', + shortDesc: 'Pairwise comparison tournament', + fullDesc: 'Condorcet-consistent method using strongest paths. Compares every pair of options and finds the winner who beats all others.', + complexity: 3, + pros: ['Condorcet winner guaranteed', 'Handles cycles', 'Clone-proof'], + cons: ['Complex to explain', 'Requires all rankings'], + }, + star: { + name: 'STAR Voting', + icon: 'icon-star', + color: '#f59e0b', + shortDesc: 'Score options 0-5, top two face runoff', + fullDesc: 'Score Then Automatic Runoff: Rate each option 0-5 stars. Top two scoring options go to automatic runoff based on preferences.', + complexity: 2, + pros: ['Express intensity', 'Automatic runoff', 'Reduces strategy'], + cons: ['Two-phase complexity', 'Score interpretation varies'], + }, + quadratic: { + name: 'Quadratic Voting', + icon: 'icon-quadratic', + color: '#ec4899', + shortDesc: 'Spend credits where cost = votes²', + fullDesc: 'Allocate credits to options. Cost grows quadratically: 1 vote = 1 credit, 2 votes = 4 credits, 3 = 9. Express intensity of preference.', + complexity: 3, + pros: ['Intensity expression', 'Prevents vote buying', 'Fair allocation'], + cons: ['Budget strategy', 'Math complexity'], + }, +}; + +const data = methodData[method]; +const complexityBars = Array(3).fill(0).map((_, i) => i < data.complexity); +--- + +
+
+
+ +
+
+

{data.name}

+
+ Complexity: +
+ {complexityBars.map((filled) => ( +
+ ))} +
+
+
+ {selected && ( +
+ +
+ )} +
+ + {!compact && ( + <> +

{data.fullDesc}

+ +
+
+ Advantages +
    + {data.pros.map(pro => ( +
  • + + {pro} +
  • + ))} +
+
+
+ Considerations +
    + {data.cons.map(con => ( +
  • + + {con} +
  • + ))} +
+
+
+ + )} + + {compact && ( +

{data.shortDesc}

+ )} +
+ + diff --git a/frontend/src/components/voting/VotingResultsChart.astro b/frontend/src/components/voting/VotingResultsChart.astro new file mode 100644 index 0000000..c2216c5 --- /dev/null +++ b/frontend/src/components/voting/VotingResultsChart.astro @@ -0,0 +1,647 @@ +--- +/** + * VotingResultsChart - Interactive visualization for voting results + * Supports all voting methods with method-specific visualizations + */ +interface Props { + proposalId; +} + +const { proposalId } = Astro.props; +--- + +
+
+
+
+ Loading results... +
+ + +
+
+ + + + diff --git a/frontend/src/layouts/Layout.astro b/frontend/src/layouts/Layout.astro new file mode 100644 index 0000000..3e19084 --- /dev/null +++ b/frontend/src/layouts/Layout.astro @@ -0,0 +1,467 @@ +--- +interface Props { + title; +} + +import { DEFAULT_THEME, themes as themeRegistry } from '../lib/themes'; +import { API_BASE as apiBase } from '../lib/api'; +import VotingIcons from '../components/icons/VotingIcons.astro'; + +const { title } = Astro.props; + + const themes = Object.fromEntries( + Object.entries(themeRegistry).map(([id, t]) => [id, { isDark: t.isDark, colors: t.colors }]), + ); + + const defaultTheme = DEFAULT_THEME; +--- + + + + + + + + + + + + {title} | Likwid + + + + +
+
+ +
+
+ +
+
+

© 2026 Likwid - Modular governance platform

+
+
+ + + + + + + diff --git a/frontend/src/layouts/PublicLayout.astro b/frontend/src/layouts/PublicLayout.astro new file mode 100644 index 0000000..63495f1 --- /dev/null +++ b/frontend/src/layouts/PublicLayout.astro @@ -0,0 +1,422 @@ +--- +interface Props { + title; + description?; +} + +import { DEFAULT_THEME, themes as themeRegistry } from '../lib/themes'; + +const { title, description = "Likwid is a modular governance engine for distributed organizations, open source communities, and civic movements." } = Astro.props; + +const themes = Object.fromEntries( + Object.entries(themeRegistry).map(([id, t]) => [id, { isDark: t.isDark, colors: t.colors }]), +); + +const defaultTheme = DEFAULT_THEME; +--- + + + + + + + + + + + + {title} | Likwid + + + +
+
+ +
+
+ +
+ +
+ + + + + + diff --git a/frontend/src/lib/api.ts b/frontend/src/lib/api.ts new file mode 100644 index 0000000..2a3d048 --- /dev/null +++ b/frontend/src/lib/api.ts @@ -0,0 +1,37 @@ +export const API_BASE = 'http://localhost:3000'; + +export interface HealthResponse { + status: string; + version: string; +} + +export interface Community { + id: string; + name: string; + slug: string; + description: string | null; + created_at: string; +} + +export interface User { + id: string; + username: string; + email: string; + display_name: string | null; + created_at: string; +} + +export async function getHealth(): Promise { + const res = await fetch(`${API_BASE}/health`); + return res.json(); +} + +export async function getCommunities(): Promise { + const res = await fetch(`${API_BASE}/api/communities`); + return res.json(); +} + +export async function getUsers(): Promise { + const res = await fetch(`${API_BASE}/api/users`); + return res.json(); +} diff --git a/frontend/src/lib/themes.ts b/frontend/src/lib/themes.ts new file mode 100644 index 0000000..d8fbcfb --- /dev/null +++ b/frontend/src/lib/themes.ts @@ -0,0 +1,287 @@ +export interface Theme { + id: string; + name: string; + description: string; + author: string; + isDark: boolean; + colors: { + bg: string; + bgAlt: string; + surface: string; + surfaceHover: string; + border: string; + borderHover: string; + text: string; + textMuted: string; + textInverse: string; + primary: string; + primaryHover: string; + primaryMuted: string; + secondary: string; + secondaryHover: string; + info: string; + infoHover: string; + infoMuted: string; + neutral: string; + neutralHover: string; + neutralMuted: string; + success: string; + successMuted: string; + successHover: string; + warning: string; + warningMuted: string; + error: string; + errorMuted: string; + errorHover: string; + link: string; + linkVisited: string; + overlay: string; + fieldBg: string; + onPrimary: string; + }; +} + +export const themes: Record = { + neutral: { + id: 'neutral', + name: 'Neutral Dark', + description: 'Default dark theme with neutral colors', + author: 'Likwid', + isDark: true, + colors: { + bg: '#0f0f0f', + bgAlt: '#141414', + surface: '#1a1a1a', + surfaceHover: '#222222', + border: '#2a2a2a', + borderHover: '#3a3a3a', + text: '#e0e0e0', + textMuted: '#888888', + textInverse: '#0f0f0f', + primary: '#6366f1', + primaryHover: '#4f46e5', + primaryMuted: 'rgba(99, 102, 241, 0.15)', + secondary: '#8b5cf6', + secondaryHover: '#7c3aed', + info: '#3b82f6', + infoHover: '#2563eb', + infoMuted: 'rgba(59, 130, 246, 0.15)', + neutral: '#6b7280', + neutralHover: '#4b5563', + neutralMuted: 'rgba(107, 114, 128, 0.18)', + success: '#22c55e', + successMuted: 'rgba(34, 197, 94, 0.15)', + successHover: '#16a34a', + warning: '#f59e0b', + warningMuted: 'rgba(245, 158, 11, 0.15)', + error: '#ef4444', + errorMuted: 'rgba(239, 68, 68, 0.15)', + errorHover: '#dc2626', + link: '#6366f1', + linkVisited: '#8b5cf6', + overlay: 'rgba(0, 0, 0, 0.8)', + fieldBg: 'rgba(0, 0, 0, 0.25)', + onPrimary: '#ffffff', + }, + }, + + 'breeze-light': { + id: 'breeze-light', + name: 'Breeze Light', + description: 'Light theme inspired by the Breeze palette', + author: 'Community', + isDark: false, + colors: { + bg: '#eff0f1', + bgAlt: '#e3e5e7', + surface: '#fcfcfc', + surfaceHover: '#f4f5f6', + border: '#bdc3c7', + borderHover: '#95a5a6', + text: '#232629', + textMuted: '#707d8a', + textInverse: '#ffffff', + primary: '#3daee9', + primaryHover: '#2980b9', + primaryMuted: 'rgba(61, 174, 233, 0.15)', + secondary: '#1d99f3', + secondaryHover: '#1a8fe8', + info: '#3daee9', + infoHover: '#2980b9', + infoMuted: 'rgba(61, 174, 233, 0.15)', + neutral: '#707d8a', + neutralHover: '#4f5b66', + neutralMuted: 'rgba(112, 125, 138, 0.2)', + success: '#27ae60', + successMuted: 'rgba(39, 174, 96, 0.15)', + successHover: '#1f8a4d', + warning: '#f67400', + warningMuted: 'rgba(246, 116, 0, 0.15)', + error: '#da4453', + errorMuted: 'rgba(218, 68, 83, 0.15)', + errorHover: '#b93a46', + link: '#2980b9', + linkVisited: '#9b59b6', + overlay: 'rgba(0, 0, 0, 0.55)', + fieldBg: '#f7f7f7', + onPrimary: '#ffffff', + }, + }, + + 'breeze-dark': { + id: 'breeze-dark', + name: 'Breeze Dark', + description: 'Dark theme inspired by the Breeze palette', + author: 'Community', + isDark: true, + colors: { + bg: '#1b1e20', + bgAlt: '#121314', + surface: '#232629', + surfaceHover: '#2d3135', + border: '#3d4349', + borderHover: '#4d5459', + text: '#fcfcfc', + textMuted: '#a1a9b1', + textInverse: '#232629', + primary: '#3daee9', + primaryHover: '#2980b9', + primaryMuted: 'rgba(61, 174, 233, 0.2)', + secondary: '#1d99f3', + secondaryHover: '#1a8fe8', + info: '#3daee9', + infoHover: '#2980b9', + infoMuted: 'rgba(61, 174, 233, 0.2)', + neutral: '#a1a9b1', + neutralHover: '#6b7280', + neutralMuted: 'rgba(161, 169, 177, 0.2)', + success: '#27ae60', + successMuted: 'rgba(39, 174, 96, 0.2)', + successHover: '#1f8a4d', + warning: '#f67400', + warningMuted: 'rgba(246, 116, 0, 0.2)', + error: '#da4453', + errorMuted: 'rgba(218, 68, 83, 0.2)', + errorHover: '#b93a46', + link: '#1d99f3', + linkVisited: '#9b59b6', + overlay: 'rgba(0, 0, 0, 0.8)', + fieldBg: 'rgba(0, 0, 0, 0.25)', + onPrimary: '#ffffff', + }, + }, + + opensuse: { + id: 'opensuse', + name: 'openSUSE', + description: 'Theme based on openSUSE branding guidelines', + author: 'openSUSE Community', + isDark: true, + colors: { + bg: '#173f4f', + bgAlt: '#1e4d5f', + surface: '#21596e', + surfaceHover: '#266a82', + border: '#2d7a94', + borderHover: '#3a8da7', + text: '#f5f5f5', + textMuted: '#a8c8d4', + textInverse: '#173f4f', + primary: '#73ba25', + primaryHover: '#6aad20', + primaryMuted: 'rgba(115, 186, 37, 0.2)', + secondary: '#35b9ab', + secondaryHover: '#2da89b', + info: '#35b9ab', + infoHover: '#2da89b', + infoMuted: 'rgba(53, 185, 171, 0.2)', + neutral: '#a8c8d4', + neutralHover: '#7aa9b9', + neutralMuted: 'rgba(168, 200, 212, 0.2)', + success: '#73ba25', + successMuted: 'rgba(115, 186, 37, 0.2)', + successHover: '#5f9d1f', + warning: '#f7a70d', + warningMuted: 'rgba(247, 167, 13, 0.2)', + error: '#e74c3c', + errorMuted: 'rgba(231, 76, 60, 0.2)', + errorHover: '#c0392b', + link: '#73ba25', + linkVisited: '#35b9ab', + overlay: 'rgba(0, 0, 0, 0.75)', + fieldBg: 'rgba(0, 0, 0, 0.18)', + onPrimary: '#ffffff', + }, + }, +}; + +export const DEFAULT_THEME = 'neutral'; + +export function getTheme(id: string): Theme { + return themes[id] || themes[DEFAULT_THEME]; +} + +export function getAllThemes(): Theme[] { + return Object.values(themes); +} + +export function applyTheme(theme: Theme): void { + const root = document.documentElement; + const { colors } = theme; + + root.style.setProperty('--color-bg', colors.bg); + root.style.setProperty('--color-bg-alt', colors.bgAlt); + root.style.setProperty('--color-surface', colors.surface); + root.style.setProperty('--color-surface-hover', colors.surfaceHover); + root.style.setProperty('--color-border', colors.border); + root.style.setProperty('--color-border-hover', colors.borderHover); + root.style.setProperty('--color-text', colors.text); + root.style.setProperty('--color-text-muted', colors.textMuted); + root.style.setProperty('--color-text-inverse', colors.textInverse); + root.style.setProperty('--color-primary', colors.primary); + root.style.setProperty('--color-primary-hover', colors.primaryHover); + root.style.setProperty('--color-primary-muted', colors.primaryMuted); + root.style.setProperty('--color-secondary', colors.secondary); + root.style.setProperty('--color-secondary-hover', colors.secondaryHover); + root.style.setProperty('--color-info', colors.info); + root.style.setProperty('--color-info-hover', colors.infoHover); + root.style.setProperty('--color-info-muted', colors.infoMuted); + root.style.setProperty('--color-neutral', colors.neutral); + root.style.setProperty('--color-neutral-hover', colors.neutralHover); + root.style.setProperty('--color-neutral-muted', colors.neutralMuted); + root.style.setProperty('--color-success', colors.success); + root.style.setProperty('--color-success-muted', colors.successMuted); + root.style.setProperty('--color-success-hover', colors.successHover); + root.style.setProperty('--color-warning', colors.warning); + root.style.setProperty('--color-warning-muted', colors.warningMuted); + root.style.setProperty('--color-error', colors.error); + root.style.setProperty('--color-error-muted', colors.errorMuted); + root.style.setProperty('--color-error-hover', colors.errorHover); + root.style.setProperty('--color-link', colors.link); + root.style.setProperty('--color-link-visited', colors.linkVisited); + root.style.setProperty('--color-overlay', colors.overlay); + root.style.setProperty('--color-field-bg', colors.fieldBg); + root.style.setProperty('--color-on-primary', colors.onPrimary); + + root.setAttribute('data-theme', theme.id); + root.setAttribute('data-theme-mode', theme.isDark ? 'dark' : 'light'); +} + +export function loadSavedTheme(): string { + if (typeof localStorage !== 'undefined') { + return localStorage.getItem('likwid-theme') || DEFAULT_THEME; + } + return DEFAULT_THEME; +} + +export function saveTheme(themeId: string): void { + if (typeof localStorage !== 'undefined') { + localStorage.setItem('likwid-theme', themeId); + } +} + +export function initTheme(): void { + const savedThemeId = loadSavedTheme(); + const theme = getTheme(savedThemeId); + applyTheme(theme); +} diff --git a/frontend/src/pages/about.astro b/frontend/src/pages/about.astro new file mode 100644 index 0000000..c298a0f --- /dev/null +++ b/frontend/src/pages/about.astro @@ -0,0 +1,462 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +--- + + +
+ + +
+

What Problem Does Likwid Solve?

+

+ Most organizations—whether open source projects, political movements, or member associations—struggle + with collective decision-making. They often rely on informal processes, ad-hoc polls, or tools designed + for other purposes (forums, chat platforms, issue trackers). +

+

+ These approaches fail in predictable ways: +

+
    +
  • Lack of structure — Discussions drift without resolution. Decisions are made implicitly or by whoever speaks loudest.
  • +
  • No audit trail — When moderation happens, there's no record of why. Shadow banning and hidden decisions erode trust.
  • +
  • Limited voting methods — Simple majority voting fails for complex decisions with multiple options or competing priorities.
  • +
  • Participation barriers — Members who can't attend synchronous meetings or follow high-volume discussions are effectively excluded.
  • +
+

+ Likwid addresses these problems by providing governance as infrastructure: a system that structures + deliberation, enforces transparency, and supports sophisticated decision-making methods. +

+
+ +
+

What Likwid Is

+
+
+

A Governance Engine

+

Likwid provides the core infrastructure for collective decision-making: proposals, deliberation, voting, and implementation tracking.

+
+
+

Modular by Design

+

Every component is a plugin. Communities choose their voting methods, delegation rules, moderation policies, and integrations.

+
+
+

Transparent by Default

+

All moderation actions are logged with reasons. There is no shadow banning. Audit trails are cryptographically verifiable.

+
+
+

Self-Hostable

+

Organizations control their own data. Likwid runs on your infrastructure with no vendor lock-in.

+
+
+
+ +
+

What Likwid Is Not

+
+
+

Not a Social Network

+

Likwid is not designed for casual conversation, content sharing, or social interactions. It's governance infrastructure.

+
+
+

Not a Simple Poll Tool

+

While Likwid supports voting, it's designed for complex decisions with deliberation phases, multiple options, and sophisticated tallying methods.

+
+
+

Not a Forum

+

Discussions in Likwid are structured and goal-oriented. They're part of a decision process, not open-ended conversation.

+
+
+

Not a CRM or Membership System

+

Likwid handles governance, not member management, donations, or communications. It integrates with those systems.

+
+
+
+ +
+

Who Is Likwid For?

+
+
+

Open Source & FLOSS Communities

+

+ Projects that need to make technical and organizational decisions collectively. + Maintainers, contributors, and users participating in governance alongside code contribution. + Integration with development workflows (GitLab, Forgejo, Gitea) and documentation systems. +

+
+
+

Associations & NGOs

+

+ Member-driven organizations requiring formal decision processes. + Annual general meetings, board decisions, and policy changes with proper record-keeping. + Transparent moderation for community standards enforcement. +

+
+
+

Political Movements & Parties

+

+ Grassroots organizations, civic lists, and political parties. + Delegate assemblies, policy development, and candidate selection. + Liquid democracy features for modern participatory politics. +

+
+
+

Federated & Distributed Organizations

+

+ Networks of chapters, working groups, or autonomous units. + Coordination across geographic or functional boundaries. + Federation features for inter-community governance. +

+
+
+
+ +
+

Core Principles

+
+
+

Transparency Over Convenience

+

Every moderation action is logged with a reason. Audit trails are public. There are no hidden decisions or shadow bans.

+
+
+

Privacy Where It Matters

+

Civic identities (who you are in discussions) and voting identities (how you voted) are separated. Participate openly while voting privately.

+
+
+

Process Over Features

+

Governance is a process, not a feature list. Likwid structures deliberation with clear phases: inform, discuss, decide.

+
+
+

Flexibility Over Prescription

+

Organizations differ. Likwid's plugin architecture lets communities configure governance to match their needs and culture.

+
+
+
+ +
+

Technical Foundation

+

+ Likwid is free and open source software (AGPLv3), built with modern, auditable technology: +

+
+
+ Backend + Rust (Axum framework) +
+
+ Frontend + Astro + TypeScript +
+
+ Database + PostgreSQL +
+
+ Containers + Podman / Docker +
+
+ License + AGPLv3 +
+
+

+ The choice of Rust provides memory safety and performance. PostgreSQL ensures data integrity + for critical governance records. The AGPLv3 license guarantees that improvements to Likwid + remain available to the community. +

+
+ +
+

Learn More

+

+ Explore Likwid's capabilities, understand our vision, or see the system in action. +

+ +
+
+
+ + diff --git a/frontend/src/pages/admin/approvals.astro b/frontend/src/pages/admin/approvals.astro new file mode 100644 index 0000000..9028731 --- /dev/null +++ b/frontend/src/pages/admin/approvals.astro @@ -0,0 +1,367 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import AdminNav from '../../components/AdminNav.astro'; +import { API_BASE as apiBase } from '../../lib/api'; +--- + + +
+ + +
+
+

Pending Approvals

+

Review and manage pending registration and community requests

+
+ +
+ + +
+ +
+
+

Loading pending registrations...

+
+
+ +
+
+

Loading pending communities...

+
+
+
+
+
+ + + + diff --git a/frontend/src/pages/admin/invitations.astro b/frontend/src/pages/admin/invitations.astro new file mode 100644 index 0000000..3564862 --- /dev/null +++ b/frontend/src/pages/admin/invitations.astro @@ -0,0 +1,384 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import AdminNav from '../../components/AdminNav.astro'; +import { API_BASE as apiBase } from '../../lib/api'; +--- + + +
+ + +
+
+

Invitation Management

+

Create and manage invitation codes for user registration

+
+ +
+

Create Invitation

+
+
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
+ +
+
+

Active Invitations

+ +
+
+

Loading invitations...

+
+
+
+
+
+ + + + diff --git a/frontend/src/pages/admin/plugins.astro b/frontend/src/pages/admin/plugins.astro new file mode 100644 index 0000000..75a58bd --- /dev/null +++ b/frontend/src/pages/admin/plugins.astro @@ -0,0 +1,353 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import { API_BASE } from '../../lib/api'; +--- + + +
+ + +
+ + +
+
+

Core Plugins

+

Essential plugins that cannot be disabled

+
+
+ +
+

Voting Plugins

+

Advanced voting methods for proposals

+
+
+ +
+

Governance Features

+

Delegation, deliberation, and moderation

+
+
+ +
+

Integrations

+

Connect with external services

+
+
+
+
+
+
+ + + + diff --git a/frontend/src/pages/admin/roles.astro b/frontend/src/pages/admin/roles.astro new file mode 100644 index 0000000..9408809 --- /dev/null +++ b/frontend/src/pages/admin/roles.astro @@ -0,0 +1,277 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import { API_BASE } from '../../lib/api'; +--- + + +
+ + +
+ + +
+

Platform Roles

+
+

Loading roles...

+
+
+ +
+

Available Permissions

+
+

Loading permissions...

+
+
+
+
+
+ + + + diff --git a/frontend/src/pages/admin/settings.astro b/frontend/src/pages/admin/settings.astro new file mode 100644 index 0000000..b3228f8 --- /dev/null +++ b/frontend/src/pages/admin/settings.astro @@ -0,0 +1,382 @@ +--- +import Layout from '../../layouts/Layout.astro'; +--- + + +
+ + +
+
+

Instance Settings

+

Configure global platform settings

+
+ +
Loading settings...
+ + + +
+
+
+ + + + diff --git a/frontend/src/pages/admin/voting.astro b/frontend/src/pages/admin/voting.astro new file mode 100644 index 0000000..00bb055 --- /dev/null +++ b/frontend/src/pages/admin/voting.astro @@ -0,0 +1,689 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import AdminNav from '../../components/AdminNav.astro'; +import { API_BASE } from '../../lib/api'; +--- + + +
+ + +
+ + + +
+

+ + Understanding Voting Methods +

+
+
+
+ +
+

Approval

+

Vote yes/no on each option. Simple and prevents spoiler effects.

+
+ + + +
+
+
+
+ +
+

Ranked Choice

+

Rank preferences. Eliminates lowest until majority found.

+
+ + + +
+
+
+
+ +
+

Schulze

+

Pairwise comparison tournament. Mathematically optimal winner.

+
+ + + +
+
+
+
+ +
+

STAR

+

Score 0-5 stars, top two face automatic runoff.

+
+ + + +
+
+
+
+ +
+

Quadratic

+

Spend credits where cost = votes². Express intensity.

+
+ + + +
+
+
+
+ + +
+
+

+ + Platform Configuration +

+

Enable or disable voting methods for all communities

+
+
+
+
+ Loading voting methods... +
+
+
+ + +
+
+

+ + Default Phase Settings +

+

Configure the default deliberation workflow for proposals

+
+
+
+
+
+ +
+ Inform + Read materials +
+
+
+
+ +
+ Discuss + Deliberate +
+
+
+
+ +
+ Vote + Decide +
+
+
+
+ +
+ Results + Outcome +
+
+
+ + + +
+
+
+
+
+
+ + + + diff --git a/frontend/src/pages/communities.astro b/frontend/src/pages/communities.astro new file mode 100644 index 0000000..99dbf66 --- /dev/null +++ b/frontend/src/pages/communities.astro @@ -0,0 +1,224 @@ +--- +import Layout from '../layouts/Layout.astro'; +--- + + +
+
+
+

Communities

+

Browse organizations and communities

+
+ +
+ + + +
+

Loading communities...

+
+ + +
+
+ + diff --git a/frontend/src/pages/communities/[slug]/index.astro b/frontend/src/pages/communities/[slug]/index.astro new file mode 100644 index 0000000..ceba4d5 --- /dev/null +++ b/frontend/src/pages/communities/[slug]/index.astro @@ -0,0 +1,671 @@ +--- +export const prerender = false; +import Layout from '../../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../../lib/api'; +const { slug } = Astro.params; +--- + + +
+
+

Loading community...

+
+
+
+ + + + diff --git a/frontend/src/pages/communities/[slug]/plugins.astro b/frontend/src/pages/communities/[slug]/plugins.astro new file mode 100644 index 0000000..164705f --- /dev/null +++ b/frontend/src/pages/communities/[slug]/plugins.astro @@ -0,0 +1,674 @@ +--- +export const prerender = false; +import Layout from '../../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../../lib/api'; +const { slug } = Astro.params; +--- + + +
+ + +
+

Loading...

+
+
+
+ + + + diff --git a/frontend/src/pages/communities/[slug]/proposals/index.astro b/frontend/src/pages/communities/[slug]/proposals/index.astro new file mode 100644 index 0000000..c2732ce --- /dev/null +++ b/frontend/src/pages/communities/[slug]/proposals/index.astro @@ -0,0 +1,236 @@ +--- +export const prerender = false; +import Layout from '../../../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../../../lib/api'; +const { slug } = Astro.params; +--- + + +
+
+
+

Proposals

+

Proposals and decisions

+
+ +
+ +
+ + +
+ +
+

Loading proposals...

+
+
+
+ + + + diff --git a/frontend/src/pages/communities/[slug]/proposals/new.astro b/frontend/src/pages/communities/[slug]/proposals/new.astro new file mode 100644 index 0000000..834e138 --- /dev/null +++ b/frontend/src/pages/communities/[slug]/proposals/new.astro @@ -0,0 +1,262 @@ +--- +export const prerender = false; +import Layout from '../../../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../../../lib/api'; +const { slug } = Astro.params; +--- + + +
+
+

Create Proposal

+
+
+ + +
+
+ + +
+
+ + +

Select all options you approve of

+
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+
+
+ + + + diff --git a/frontend/src/pages/communities/[slug]/settings.astro b/frontend/src/pages/communities/[slug]/settings.astro new file mode 100644 index 0000000..6e01fc0 --- /dev/null +++ b/frontend/src/pages/communities/[slug]/settings.astro @@ -0,0 +1,336 @@ +--- +export const prerender = false; +import Layout from '../../../layouts/Layout.astro'; + +const { slug } = Astro.params; +--- + + +
+
+ ← Back to Community +

Community Settings

+

Configure settings for this community

+
+ +
Loading settings...
+ + + + +
+
+ + + + diff --git a/frontend/src/pages/communities/[slug]/voting-config.astro b/frontend/src/pages/communities/[slug]/voting-config.astro new file mode 100644 index 0000000..b11826e --- /dev/null +++ b/frontend/src/pages/communities/[slug]/voting-config.astro @@ -0,0 +1,306 @@ +--- +export const prerender = false; +import Layout from '../../../layouts/Layout.astro'; +import { API_BASE } from '../../../lib/api'; +const { slug } = Astro.params; +--- + + +
+ + +
+

Loading voting methods...

+
+
+
+ + + + diff --git a/frontend/src/pages/communities/new.astro b/frontend/src/pages/communities/new.astro new file mode 100644 index 0000000..dcd2eee --- /dev/null +++ b/frontend/src/pages/communities/new.astro @@ -0,0 +1,162 @@ +--- +import Layout from '../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../lib/api'; +--- + + +
+
+

Create Community

+
+
+ + +
+
+ + + Lowercase letters, numbers, and hyphens only +
+
+ + +
+
+ +
+
+
+
+ + + + diff --git a/frontend/src/pages/dashboard.astro b/frontend/src/pages/dashboard.astro new file mode 100644 index 0000000..0021534 --- /dev/null +++ b/frontend/src/pages/dashboard.astro @@ -0,0 +1,349 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+
+

Loading...

+
+
+
+ + + + diff --git a/frontend/src/pages/delegations.astro b/frontend/src/pages/delegations.astro new file mode 100644 index 0000000..86d149f --- /dev/null +++ b/frontend/src/pages/delegations.astro @@ -0,0 +1,545 @@ +--- +import Layout from '../layouts/Layout.astro'; +--- + + +
+ + +
+

Please log in to manage delegations.

+
+ + + + + +
+
+ + + + diff --git a/frontend/src/pages/demo.astro b/frontend/src/pages/demo.astro new file mode 100644 index 0000000..6b3a326 --- /dev/null +++ b/frontend/src/pages/demo.astro @@ -0,0 +1,872 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +import { API_BASE } from '../lib/api'; +--- + + +
+ + +
+
+

Why a Demo Instance?

+

+ Governance only makes sense when it has context. Decisions need history. + Delegation needs visible relationships. Moderation needs traceable events. + An empty system communicates nothing. +

+

+ Our demo instance includes pre-seeded communities with: +

+
    +
  • Past, ongoing, and scheduled decisions
  • +
  • Active delegation networks
  • +
  • Visible moderation history
  • +
  • Multiple voting methods in use
  • +
  • Real governance narratives, not random data
  • +
+
+
+ +
+

Demo Communities

+

+ Explore different types of organizations and see how they use Likwid for governance. +

+ +
+
+
+

Aurora Framework

+ Open Source Project +
+

+ A fictional open source project demonstrating technical decision-making. + See RFC processes, feature prioritization, and maintainer elections using + Schulze voting and liquid delegation. +

+
+ Loading... +
+ Explore Aurora → +
+ +
+
+

Civic Commons Network

+ Political Movement +
+

+ A grassroots civic organization showing policy development and delegate assemblies. + Observe quadratic voting for priority setting and transparent moderation + for community standards. +

+
+ Loading... +
+ Explore Civic Commons → +
+ +
+
+

Regional Makers Collective

+ Federation +
+

+ A network of local chapters demonstrating federated governance. + See cross-community coordination, shared resources decisions, + and chapter autonomy in practice. +

+
+ Loading... +
+ Explore Makers → +
+
+
+ +
+

How to Explore

+ +
+
+
👁️
+

Browse Freely

+

+ Navigate communities, read proposals, view voting results, and explore + delegation networks without any account. Full read access is public. +

+ Browse Communities → +
+ +
+
🧪
+

Demo Accounts

+

+ Click a demo account below to log in instantly and experience participation: + vote on proposals, create delegations, and see the member experience firsthand. +

+
+ + + +
+

All demo accounts use password: demo123

+
+
+ +
+

Demo Reset Notice

+

+ The demo instance resets periodically to maintain a known state. + Any changes you make will be reverted. This ensures every visitor + experiences meaningful governance data, not accumulated noise. +

+
+
+ +
+

Suggested Exploration Path

+ +
+
+
1
+
+

Browse a Community

+

Start with Aurora Framework. See the community overview, active members, and governance structure.

+
+
+
+
2
+
+

Read a Completed Decision

+

Find a closed proposal. See the deliberation history, voting results, and how the outcome was reached.

+
+
+
+
3
+
+

Explore Delegations

+

View the delegation network. See who trusts whom on which topics and how votes flow.

+
+
+
+
4
+
+

Check the Moderation Log

+

See transparent moderation in action. Every action has a reason, a rule reference, and accountability.

+
+
+
+
5
+
+

Cast a Vote

+

Log in with a demo account and vote on an active proposal. Experience different voting methods.

+
+
+
+
+ +
+

Ready to Explore?

+

+ The demo is live and waiting. No registration required to browse. + Use demo accounts to participate. +

+ +
+ +
+

What's Next?

+ +
+
+
+ + + + diff --git a/frontend/src/pages/docs.astro b/frontend/src/pages/docs.astro new file mode 100644 index 0000000..669999b --- /dev/null +++ b/frontend/src/pages/docs.astro @@ -0,0 +1,354 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +--- + + +
+ + +
+ + + + + + + + + + + +
+ +
+
+

Documentation In Progress

+

+ Likwid is under active development and documentation is being written alongside the codebase. + Some sections may be incomplete or subject to change. For the latest information, see the + Codeberg repository. +

+
+
+ +
+

Contributing to Documentation

+

+ Found an error? Want to add a section? Documentation contributions are welcome. +

+ +
+
+
+ + diff --git a/frontend/src/pages/features.astro b/frontend/src/pages/features.astro new file mode 100644 index 0000000..57875e8 --- /dev/null +++ b/frontend/src/pages/features.astro @@ -0,0 +1,680 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +--- + + +
+ + + +
+
+

Advanced Voting Methods

+

+ Different decisions require different methods. Likwid supports multiple voting systems, + each implemented as a plugin that communities can enable based on their needs. +

+
+ +
+
+
+

Schulze Method

+ Condorcet-consistent +
+

+ Finds the option that would beat every other option in a head-to-head comparison. + Ideal for elections and decisions where finding a true consensus winner matters. +

+
+ Best for: Elections, selecting from many candidates, finding compromise options +
+
+ +
+
+

STAR Voting

+ Score + Runoff +
+

+ Score Then Automatic Runoff. Voters rate each option 0-5, then the top two + advance to an instant runoff. Balances preference intensity with broad support. +

+
+ Best for: Decisions where both passion and consensus matter +
+
+ +
+
+

Quadratic Voting

+ Intensity-weighted +
+

+ Voters spend "voice credits" on options they care about. The cost increases + quadratically, so expressing strong preference costs more. Reveals preference intensity. +

+
+ Best for: Budget allocation, prioritization, decisions where some care more than others +
+
+ +
+
+

Ranked Choice

+ Instant Runoff +
+

+ Voters rank options in order of preference. If no option has a majority, + the lowest-ranked is eliminated and votes transfer. Continues until a winner emerges. +

+
+ Best for: Single-winner elections with multiple candidates +
+
+ +
+
+

Approval Voting

+ Simple multi-select +
+

+ Voters approve or disapprove each option. The option with the most approvals wins. + Simple, resistant to strategic voting, good for quick decisions. +

+
+ Best for: Simple decisions, scheduling, selecting multiple winners +
+
+
+
+ + +
+
+

Liquid Delegation

+

+ Not everyone can participate in every decision. Liquid delegation lets members + delegate their voice while retaining control. +

+
+ +
+
+

Topic-Based Delegation

+

+ Delegate on specific topics (technical decisions, policy, budget) to people + you trust in those areas. Keep direct control over topics you care about personally. +

+
+
+

Transitive Chains

+

+ Your delegate can further delegate to their trusted experts, creating chains + of trust. Cycle detection ensures no infinite loops. +

+
+
+

Always Revocable

+

+ Override any delegation by voting directly. Revoke delegation at any time. + Your voice remains under your control. +

+
+
+

Transparent Networks

+

+ See delegation relationships (with privacy controls). Understand who influences + decisions and how trust flows through the community. +

+
+
+
+ + +
+
+

Structured Deliberation

+

+ Good decisions require good discussion. Likwid structures deliberation to ensure + informed, inclusive, and productive conversations. +

+
+ +
+
+
📖
+

Inform

+

+ Proposals enter an inform phase. Members read the proposal, supporting documents, + and context before discussion begins. Optional read-time requirements ensure engagement. +

+
+
+
+
💬
+

Discuss

+

+ Structured discussion with facilitator tools. Comments can be organized, + constructive input surfaced, and concerns identified. Small group breakouts for complex topics. +

+
+
+
+
+

Decide

+

+ Voting opens after adequate deliberation. Clear deadlines, reminder notifications, + and results calculated using the community's chosen method. +

+
+
+ +
+
+

Facilitator Tools

+

Designated facilitators can organize discussions, highlight key points, and manage conversation flow.

+
+
+

Configurable Phases

+

Set minimum and maximum durations for each phase. Extend deadlines when needed.

+
+
+

Notification System

+

Members receive timely notifications about proposals, phase transitions, and voting deadlines.

+
+
+

Amendment Process

+

Proposals can be amended during deliberation based on feedback, with clear change tracking.

+
+
+
+ + +
+
+

Transparent Moderation

+

+ Every community needs moderation. Likwid makes moderation visible, accountable, + and auditable—building trust through transparency. +

+
+ +
+
+

Public Moderation Ledger

+

+ Every moderation action is logged with who took it, what rule was violated, + what evidence supported the decision, and what content was affected. + No shadow banning. No hidden removals. +

+
+
+

Cryptographic Audit Trail

+

+ The moderation ledger is cryptographically chained. Each entry references + the previous one, making retroactive alteration detectable. Integrity you can verify. +

+
+
+

Rule-Based Actions

+

+ Communities define their moderation rules. Actions reference specific rules, + creating consistency and enabling appeals based on documented standards. +

+
+
+

Separate Identities

+

+ Civic identity (who you are in discussions) and voting identity (how you voted) + are separated. Participate openly while maintaining ballot secrecy. +

+
+
+
+ + +
+
+

Plugin Architecture

+

+ Likwid is modular by design. Every major component is a plugin that can be + enabled, disabled, or customized per community. +

+
+ +
+
+

Voting Plugins

+
    +
  • Approval Voting
  • +
  • Ranked Choice
  • +
  • Schulze Method
  • +
  • STAR Voting
  • +
  • Quadratic Voting
  • +
+
+
+

Feature Plugins

+
    +
  • Liquid Delegation
  • +
  • Structured Deliberation
  • +
  • Moderation Ledger
  • +
  • Federation
  • +
  • Conflict Resolution
  • +
+
+
+

Integration Plugins

+
    +
  • GitLab Integration
  • +
  • Matrix Chat
  • +
  • Webhook Notifications
  • +
  • OAuth Providers
  • +
  • Custom Workflows
  • +
+
+
+ +
+

+ Communities enable only the plugins they need. Platform administrators control + which plugins are available. Third-party plugins extend functionality further. +

+
+
+ + +
+
+

Federation

+

+ Connect communities across instances while maintaining autonomy. + Share decisions, coordinate actions, and build networks of trust. +

+
+ +
+
+

Cross-Instance Coordination

+

Communities on different Likwid instances can federate, sharing proposals and decisions.

+
+
+

Trust Levels

+

Configure trust levels for federated instances. Control what information is shared and how.

+
+
+

Autonomy Preserved

+

Each community maintains control over its own governance. Federation is cooperative, not hierarchical.

+
+
+

Network Effects

+

Federated communities can coordinate across geographic or functional boundaries.

+
+
+
+ + +
+

See It In Action

+

+ The best way to understand Likwid is to experience it. Explore our demo instance + with pre-populated communities and governance history. +

+ +
+
+
+ + diff --git a/frontend/src/pages/index.astro b/frontend/src/pages/index.astro new file mode 100644 index 0000000..c114e61 --- /dev/null +++ b/frontend/src/pages/index.astro @@ -0,0 +1,565 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +--- + + + +
+
+

Governance infrastructure for distributed organizations

+

+ Likwid is a modular governance engine designed for communities that need + structured decision-making, transparent moderation, and flexible delegation systems. +

+

+ Not a social network. Not a simple polling tool. + A complete governance layer for complex, distributed organizations. +

+ +
+
+ + +
+
+

What is Likwid?

+
+
+
+ + + + + +
+

Modular Architecture

+

Every component is a plugin. Voting methods, delegation systems, integrations, and workflows can be added, removed, or customized per community.

+
+
+
+ + + + +
+

Process-Oriented

+

Governance is a process, not a feature. Likwid structures deliberation, voting, and implementation as distinct phases with clear transitions.

+
+
+
+ + + + + + + +
+

Auditable by Design

+

All moderation actions are logged publicly. No shadow banning. No hidden decisions. Every action has a reason, and every reason is visible.

+
+
+
+
+ + +
+
+

Built for organizations that take governance seriously

+
+
+

Open Source Projects

+

Maintainers, contributors, and communities making technical and organizational decisions together.

+
+
+

Associations & NGOs

+

Member-driven organizations requiring transparent voting, delegation, and accountability.

+
+
+

Political Movements

+

Parties, civic lists, and grassroots movements needing structured collective decision-making.

+
+
+

Federated Networks

+

Distributed organizations coordinating across chapters, regions, or working groups.

+
+
+
+
+ + +
+
+

Core Capabilities

+
+
+

Advanced Voting Methods

+

Schulze, STAR, Quadratic, Ranked Choice, and Approval voting. Each method is a plugin that can be enabled per community.

+
+
+

Liquid Delegation

+

Topic-based, time-limited, and revocable delegation. Delegate your voice on specific topics to trusted members.

+
+
+

Structured Deliberation

+

Inform → Discuss → Decide workflow. Ensures members understand proposals before voting begins.

+
+
+

Transparent Moderation

+

Every moderation action is logged with reasons. Cryptographic audit trail ensures accountability.

+
+
+

Privacy-Preserving Voting

+

Separate civic and voting identities. Participate publicly while voting privately.

+
+
+

Federation Ready

+

Connect communities across instances. Share decisions while maintaining autonomy.

+
+
+
+
+ + +
+
+

How Governance Works in Likwid

+
+
+
1
+

Proposal Creation

+

A member drafts a proposal with clear options. The proposal enters an inform phase where members can read and understand it.

+
+
+
+
2
+

Deliberation

+

Structured discussion with facilitator tools. Comments are organized to surface constructive input and identify concerns.

+
+
+
+
3
+

Voting

+

Members vote using the community's chosen method. Delegations are resolved. Results are calculated transparently.

+
+
+
+
4
+

Implementation

+

Decisions are recorded and tracked. Integration plugins can trigger external actions or workflows.

+
+
+
+
+ + +
+
+

Technical Foundation

+

Likwid is free and open source software, built with modern, auditable technology.

+
+
+ Backend + Rust (Axum) +
+
+ Frontend + Astro + TypeScript +
+
+ Database + PostgreSQL +
+
+ License + AGPLv3 +
+
+

+ Self-hostable. No vendor lock-in. Your governance data stays under your control. +

+
+
+ + +
+
+

Experience Governance in Action

+

The best way to understand Likwid is to see it working. Explore our demo instance with pre-populated communities, ongoing decisions, and real governance history.

+ +
+
+
+ + diff --git a/frontend/src/pages/login.astro b/frontend/src/pages/login.astro new file mode 100644 index 0000000..fd52219 --- /dev/null +++ b/frontend/src/pages/login.astro @@ -0,0 +1,141 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+
+

Login

+
+
+ + +
+
+ + +
+
+ +
+

+ Don't have an account? Register +

+
+
+
+ + + + diff --git a/frontend/src/pages/manifesto.astro b/frontend/src/pages/manifesto.astro new file mode 100644 index 0000000..e8c5370 --- /dev/null +++ b/frontend/src/pages/manifesto.astro @@ -0,0 +1,452 @@ +--- +import PublicLayout from '../layouts/PublicLayout.astro'; +--- + + +
+ + +
+

The Problem with Digital Governance

+

+ We have built extraordinary tools for communication, collaboration, and coordination. + Yet when it comes to collective decision-making—the core function of any democratic organization—we + still rely on methods designed for physical assemblies or, worse, tools designed for entirely different purposes. +

+

+ Open source projects make critical decisions in issue trackers. Political movements coordinate through + chat platforms. Associations run elections via email. These tools were not designed for governance, + and it shows. +

+
+

+ "The medium shapes the message. When we govern through tools designed for other purposes, + we inherit their assumptions, their limitations, and their biases." +

+
+

+ The result is predictable: decisions happen informally, participation is uneven, + moderation is opaque, and trust erodes over time. Organizations that should be models + of collective action become dominated by those with the most time, the loudest voices, + or the most technical access. +

+
+ +
+

Governance as Infrastructure

+

+ Likwid treats governance not as a feature to be bolted onto other systems, but as + infrastructure—a foundational layer that organizations build upon. +

+

+ Just as we don't expect organizations to build their own databases or web servers, + they shouldn't need to build their own governance systems. They need infrastructure + that is: +

+
    +
  • + Reliable — Governance decisions are high-stakes. The system must be + consistent, auditable, and resistant to manipulation. +
  • +
  • + Flexible — Organizations have different cultures, scales, and needs. + The infrastructure must adapt, not prescribe. +
  • +
  • + Transparent — Power exercised in secret corrodes trust. Every action + that affects members must be visible and accountable. +
  • +
  • + Accessible — Participation cannot be limited to those who can attend + synchronous meetings or follow high-volume discussions. +
  • +
+
+ +
+

The Principles of Democracy Design

+ +
+

1. Information Must Be Understandable, Not Just Available

+

+ Transparency is necessary but not sufficient. Dumping raw data on members and calling + it "transparency" is a form of obfuscation. Information must be presented in ways that + enable understanding and informed participation. +

+

+ This means: layered complexity (quick overview to deep dive), interactive visualizations + where appropriate, clear explanations of process and context, and accessible language + alongside technical precision. +

+
+ +
+

2. Deliberation Must Be Structured

+

+ Free-form discussion favors those with time and rhetorical skill. Unstructured debates + drift without resolution. Effective deliberation requires structure: clear phases, + defined roles, and mechanisms to surface constructive input. +

+

+ Likwid implements the Inform → Discuss → Decide workflow: +

+
    +
  • Inform: Members read and understand the proposal before discussion begins.
  • +
  • Discuss: Structured deliberation with facilitator tools to manage conversation.
  • +
  • Decide: Voting with appropriate methods for the decision at hand.
  • +
+
+ +
+

3. Voting Methods Must Match Decision Types

+

+ Simple majority voting fails for complex decisions. When there are multiple options, + competing priorities, or minority concerns that deserve protection, more sophisticated + methods are required. +

+

+ Different decisions call for different methods: +

+
    +
  • Schulze method for finding the option that would beat all others head-to-head.
  • +
  • STAR voting for balancing intensity of preference with broad support.
  • +
  • Quadratic voting for decisions where some members care more than others.
  • +
  • Approval voting for simple multi-option choices.
  • +
  • Ranked choice for instant runoff scenarios.
  • +
+

+ Communities should choose methods that fit their culture and decision types, + not be locked into a single approach. +

+
+ +
+

4. Delegation Must Be Flexible and Revocable

+

+ Not everyone can participate in every decision. Liquid delegation allows members + to delegate their voice on specific topics to trusted representatives—while retaining + the ability to vote directly or revoke delegation at any time. +

+

+ This creates a spectrum between direct democracy (everyone votes on everything) and + representative democracy (elected delegates decide). Members choose their level of + engagement per topic, creating natural expertise networks while maintaining individual sovereignty. +

+
+ +
+

5. Moderation Must Be Visible and Accountable

+

+ Every community needs moderation. The question is whether it happens transparently + or in the shadows. Shadow banning, hidden removals, and opaque decisions erode trust + and create paranoia. +

+

+ In Likwid, every moderation action is logged with: +

+
    +
  • Who took the action
  • +
  • What rule or policy was violated
  • +
  • What evidence supported the decision
  • +
  • What the affected content was (preserved for appeal)
  • +
+

+ The moderation ledger is cryptographically chained, making retroactive alteration detectable. + Trust is built through accountability, not through the appearance of an unmoderated space. +

+
+ +
+

6. Privacy and Transparency Must Coexist

+

+ Some information should be public (moderation decisions, vote tallies, policy rationales). + Some should be private (individual vote choices, personal data, draft deliberations). +

+

+ Likwid separates civic identity (who you are in public discussions) from + voting identity (how you voted). This allows open participation in + deliberation while protecting ballot secrecy. Members can engage freely without fear + that their votes will be used against them. +

+
+
+ +
+

The Political Stakes

+

+ This is not merely a technical project. The design of governance tools is inherently political. + The systems we build encode assumptions about power, participation, and legitimacy. +

+

+ Platforms that prioritize engagement over deliberation produce polarization. + Systems that obscure moderation produce conspiracy theories. + Tools that favor the already-powerful entrench existing hierarchies. +

+

+ Likwid is built on the conviction that better tools can enable better governance—not + by removing human judgment, but by creating structures that encourage informed, inclusive, + and accountable decision-making. +

+

+ We believe that: +

+
    +
  • Organizations should control their own governance infrastructure.
  • +
  • Participation should not require technical expertise or unlimited time.
  • +
  • Power should be visible and accountable, not hidden behind algorithms or administrative access.
  • +
  • Minority voices deserve protection from simple majoritarianism.
  • +
  • Governance software should be free, auditable, and community-controlled.
  • +
+
+ +
+

An Invitation

+

+ Likwid is not finished. It is not perfect. It is an ongoing experiment in building + governance infrastructure for the organizations that need it most. +

+

+ We invite: +

+
    +
  • Organizations to try Likwid, provide feedback, and help us understand what governance infrastructure needs to do.
  • +
  • Developers to contribute code, build plugins, and extend the platform's capabilities.
  • +
  • Researchers to study collective decision-making and help us understand what works.
  • +
  • Critics to challenge our assumptions and help us identify blind spots.
  • +
+

+ Democracy is not a product. It is a practice. Likwid exists to make that practice easier, + more inclusive, and more accountable. +

+
+ +
+ Explore the Demo + Learn More About Likwid +
+
+
+ + diff --git a/frontend/src/pages/notifications.astro b/frontend/src/pages/notifications.astro new file mode 100644 index 0000000..93f2b5a --- /dev/null +++ b/frontend/src/pages/notifications.astro @@ -0,0 +1,216 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+
+

Notifications

+ +
+ +
+

Loading notifications...

+
+
+
+ + + + diff --git a/frontend/src/pages/proposals.astro b/frontend/src/pages/proposals.astro new file mode 100644 index 0000000..9ef33e3 --- /dev/null +++ b/frontend/src/pages/proposals.astro @@ -0,0 +1,246 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+
+
+

All Proposals

+

Browse proposals across all communities

+
+
+ +
+ + + +
+ +
+

Loading proposals...

+
+
+
+ + + + diff --git a/frontend/src/pages/proposals/[id].astro b/frontend/src/pages/proposals/[id].astro new file mode 100644 index 0000000..3b4aadd --- /dev/null +++ b/frontend/src/pages/proposals/[id].astro @@ -0,0 +1,1214 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../lib/api'; +const { id } = Astro.params; +--- + + +
+
+

Loading proposal...

+
+
+
+ + + + diff --git a/frontend/src/pages/register.astro b/frontend/src/pages/register.astro new file mode 100644 index 0000000..bd85763 --- /dev/null +++ b/frontend/src/pages/register.astro @@ -0,0 +1,151 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+
+

Register

+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+

+ Already have an account? Login +

+
+
+
+ + + + diff --git a/frontend/src/pages/settings.astro b/frontend/src/pages/settings.astro new file mode 100644 index 0000000..71d4728 --- /dev/null +++ b/frontend/src/pages/settings.astro @@ -0,0 +1,326 @@ +--- +import Layout from '../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../lib/api'; +--- + + +
+

Settings

+ +
+

Loading...

+
+
+
+ + + + diff --git a/frontend/src/pages/setup.astro b/frontend/src/pages/setup.astro new file mode 100644 index 0000000..9167f35 --- /dev/null +++ b/frontend/src/pages/setup.astro @@ -0,0 +1,393 @@ +--- +import Layout from '../layouts/Layout.astro'; + +// Check if setup is needed +let setupRequired = true; +let instanceName = null; + +try { + const res = await fetch('http://127.0.0.1:3000/api/settings/setup/status'); + if (res.ok) { + const data = await res.json(); + setupRequired = data.setup_required; + instanceName = data.instance_name; + } +} catch (e) { + // Backend not available, assume setup needed +} + +// Redirect if setup already complete +if (!setupRequired) { + return Astro.redirect('/'); +} +--- + + +
+
+
+

Welcome to Likwid

+

Let's configure your governance platform

+
+ +
+ +
+

Instance Identity

+ +
+ + + This name will appear in the header and emails +
+
+ + +
+

Platform Mode

+

How should communities be created on this platform?

+ +
+ + + + + + + +
+ + +
+ + +
+

Admin Account

+

+ You need to be logged in as an admin to complete setup. + The first user registered becomes the admin. +

+ +
+

Checking authentication...

+
+
+ +
+ +
+
+
+
+
+ + + + diff --git a/frontend/src/pages/users/[username].astro b/frontend/src/pages/users/[username].astro new file mode 100644 index 0000000..1d8d922 --- /dev/null +++ b/frontend/src/pages/users/[username].astro @@ -0,0 +1,282 @@ +--- +export const prerender = false; +import Layout from '../../layouts/Layout.astro'; +import { API_BASE as apiBase } from '../../lib/api'; +const { username } = Astro.params; +--- + + +
+
+

Loading profile...

+
+
+
+ + + + diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json new file mode 100644 index 0000000..8bf91d3 --- /dev/null +++ b/frontend/tsconfig.json @@ -0,0 +1,5 @@ +{ + "extends": "astro/tsconfigs/strict", + "include": [".astro/types.d.ts", "**/*"], + "exclude": ["dist"] +} diff --git a/scripts/.dev/state.json b/scripts/.dev/state.json new file mode 100644 index 0000000..9853620 --- /dev/null +++ b/scripts/.dev/state.json @@ -0,0 +1,5 @@ +{ + "frontendPid": 7820, + "backendPid": 1620, + "startedAt": "2026-01-27T15:04:02.0905073+01:00" +} diff --git a/scripts/demo-reset.ps1 b/scripts/demo-reset.ps1 new file mode 100644 index 0000000..2a5ccee --- /dev/null +++ b/scripts/demo-reset.ps1 @@ -0,0 +1,59 @@ +# Demo Reset Script (Windows PowerShell) +# Resets the demo instance to a clean state with fresh seed data + +param( + [switch]$Force +) + +$ErrorActionPreference = "Stop" + +Write-Host "=== Likwid Demo Reset ===" -ForegroundColor Cyan + +if (-not $Force) { + $confirm = Read-Host "This will DELETE all demo data and reset to initial state. Continue? (y/N)" + if ($confirm -ne "y" -and $confirm -ne "Y") { + Write-Host "Aborted." -ForegroundColor Yellow + exit 0 + } +} + +Set-Location "$PSScriptRoot\.." + +Write-Host "`n[1/4] Stopping demo containers..." -ForegroundColor Yellow +podman-compose -f compose/demo.yml down + +Write-Host "`n[2/4] Removing demo database volume..." -ForegroundColor Yellow +podman volume rm likwid_demo_data -f 2>$null + +Write-Host "`n[3/4] Starting fresh demo instance..." -ForegroundColor Yellow +podman-compose -f compose/demo.yml up -d + +Write-Host "`n[4/4] Waiting for services to be ready..." -ForegroundColor Yellow +Start-Sleep -Seconds 10 + +# Check if backend is responding +$maxRetries = 30 +$retry = 0 +while ($retry -lt $maxRetries) { + try { + $response = Invoke-WebRequest -Uri "http://localhost:3001/health" -UseBasicParsing -TimeoutSec 2 + if ($response.StatusCode -eq 200) { + Write-Host "`n=== Demo Reset Complete ===" -ForegroundColor Green + Write-Host "`nDemo is ready at:" + Write-Host " Frontend: http://localhost:4322" -ForegroundColor Cyan + Write-Host " Backend: http://localhost:3001" -ForegroundColor Cyan + Write-Host "`nDemo accounts (password: demo123):" + Write-Host " - contributor (standard member)" + Write-Host " - moderator (can moderate)" + Write-Host " - observer (read-only)" + exit 0 + } + } catch { + $retry++ + Write-Host "." -NoNewline + Start-Sleep -Seconds 1 + } +} + +Write-Host "`nWarning: Backend health check timed out. Check logs with:" -ForegroundColor Yellow +Write-Host " podman-compose -f compose/demo.yml logs backend" diff --git a/scripts/demo-reset.sh b/scripts/demo-reset.sh new file mode 100644 index 0000000..b0a7c03 --- /dev/null +++ b/scripts/demo-reset.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Demo Reset Script (Linux/macOS) +# Resets the demo instance to a clean state with fresh seed data + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR/.." + +echo "=== Likwid Demo Reset ===" + +if [ "$1" != "--force" ] && [ "$1" != "-f" ]; then + read -p "This will DELETE all demo data and reset to initial state. Continue? (y/N) " confirm + if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then + echo "Aborted." + exit 0 + fi +fi + +echo -e "\n[1/4] Stopping demo containers..." +podman-compose -f compose/demo.yml down || docker-compose -f compose/demo.yml down + +echo -e "\n[2/4] Removing demo database volume..." +podman volume rm likwid_demo_data -f 2>/dev/null || docker volume rm likwid_demo_data -f 2>/dev/null || true + +echo -e "\n[3/4] Starting fresh demo instance..." +podman-compose -f compose/demo.yml up -d || docker-compose -f compose/demo.yml up -d + +echo -e "\n[4/4] Waiting for services to be ready..." +sleep 5 + +max_retries=30 +retry=0 +while [ $retry -lt $max_retries ]; do + if curl -s http://localhost:3001/health > /dev/null 2>&1; then + echo -e "\n=== Demo Reset Complete ===" + echo -e "\nDemo is ready at:" + echo " Frontend: http://localhost:4322" + echo " Backend: http://localhost:3001" + echo -e "\nDemo accounts (password: demo123):" + echo " - contributor (standard member)" + echo " - moderator (can moderate)" + echo " - observer (read-only)" + exit 0 + fi + retry=$((retry + 1)) + echo -n "." + sleep 1 +done + +echo -e "\nWarning: Backend health check timed out. Check logs with:" +echo " podman-compose -f compose/demo.yml logs backend" diff --git a/scripts/dev-start.ps1 b/scripts/dev-start.ps1 new file mode 100644 index 0000000..235924f --- /dev/null +++ b/scripts/dev-start.ps1 @@ -0,0 +1,107 @@ +<# +.SYNOPSIS + Starts the Likwid development environment. +.DESCRIPTION + Starts PostgreSQL (via Podman), backend (Rust/Axum), and frontend (Astro). + Use dev-stop.ps1 to stop all services. +#> +[CmdletBinding()] +param() + +$ErrorActionPreference = 'Stop' + +# Paths +$root = Split-Path -Parent $PSScriptRoot +$stateDir = Join-Path $PSScriptRoot '.dev' +$stateFile = Join-Path $stateDir 'state.json' +$backendLog = Join-Path $stateDir 'backend.log' +$frontendLog = Join-Path $stateDir 'frontend.log' + +New-Item -ItemType Directory -Force -Path $stateDir | Out-Null + +# Environment +if (-not $env:POSTGRES_USER) { $env:POSTGRES_USER = 'likwid' } +if (-not $env:POSTGRES_PASSWORD) { $env:POSTGRES_PASSWORD = 'likwid' } +if (-not $env:POSTGRES_DB) { $env:POSTGRES_DB = 'likwid' } +$env:DATABASE_URL = "postgres://$($env:POSTGRES_USER):$($env:POSTGRES_PASSWORD)@127.0.0.1:5432/$($env:POSTGRES_DB)" + +# Check if already running +if (Test-Path $stateFile) { + $state = Get-Content -Raw $stateFile | ConvertFrom-Json -ErrorAction SilentlyContinue + if ($state -and $state.backendPid) { + $p = Get-Process -Id $state.backendPid -ErrorAction SilentlyContinue + if ($p) { + Write-Host "Already running (backend PID $($state.backendPid)). Run dev-stop.ps1 first." + exit 0 + } + } +} + +# Start Podman machine if needed +try { + $machines = podman machine list --format json 2>$null | ConvertFrom-Json + $running = $machines | Where-Object { $_.Running -eq $true } + if (-not $running) { + Write-Host "Starting Podman machine..." + podman machine start ($machines | Select-Object -First 1).Name 2>$null + } +} catch { + Write-Host "Podman machine check failed (may already be running)" +} + +# Start PostgreSQL container +Write-Host "Starting PostgreSQL..." +$composeFile = Join-Path $root 'compose/dev.yml' +podman-compose -f $composeFile up -d 2>$null + +# Wait for PostgreSQL to be ready +$maxWait = 30 +for ($i = 0; $i -lt $maxWait; $i++) { + $pg = netstat -ano 2>$null | Select-String ':5432.*LISTENING' + if ($pg) { break } + Start-Sleep -Seconds 1 +} + +# Start backend +Write-Host "Starting backend..." +$backend = Start-Process -FilePath 'cmd.exe' ` + -ArgumentList '/c', 'cargo', 'run', '2>&1' ` + -WorkingDirectory (Join-Path $root 'backend') ` + -PassThru -WindowStyle Hidden ` + -RedirectStandardOutput $backendLog + +# Start frontend +Write-Host "Starting frontend..." +$frontend = Start-Process -FilePath 'cmd.exe' ` + -ArgumentList '/c', 'npm', 'run', 'dev', '2>&1' ` + -WorkingDirectory (Join-Path $root 'frontend') ` + -PassThru -WindowStyle Hidden ` + -RedirectStandardOutput $frontendLog + +# Save state +@{ + backendPid = $backend.Id + frontendPid = $frontend.Id + startedAt = (Get-Date).ToString('o') +} | ConvertTo-Json | Set-Content -Encoding UTF8 $stateFile + +# Brief wait for startup +Start-Sleep -Seconds 3 + +# Status report +Write-Host "" +Write-Host "=== Likwid Dev Environment ===" +if (-not $backend.HasExited) { + Write-Host "Backend: Running (PID $($backend.Id)) - http://127.0.0.1:3000" +} else { + Write-Host "Backend: FAILED - check $backendLog" +} +if (-not $frontend.HasExited) { + Write-Host "Frontend: Running (PID $($frontend.Id)) - http://localhost:4321" +} else { + Write-Host "Frontend: FAILED - check $frontendLog" +} +Write-Host "PostgreSQL: Running on port 5432" +Write-Host "" +Write-Host "Logs: $stateDir" +Write-Host "Stop: .\scripts\dev-stop.ps1" diff --git a/scripts/dev-start.sh b/scripts/dev-start.sh new file mode 100644 index 0000000..b8a7a3f --- /dev/null +++ b/scripts/dev-start.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# Likwid Development Environment Starter +# Starts PostgreSQL (Podman), backend (Rust), and frontend (Astro) + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(dirname "$SCRIPT_DIR")" +STATE_DIR="$SCRIPT_DIR/.dev" +STATE_FILE="$STATE_DIR/state.json" +BACKEND_LOG="$STATE_DIR/backend.log" +FRONTEND_LOG="$STATE_DIR/frontend.log" + +mkdir -p "$STATE_DIR" + +# Environment defaults +export POSTGRES_USER="${POSTGRES_USER:-likwid}" +export POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-likwid}" +export POSTGRES_DB="${POSTGRES_DB:-likwid}" +export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@127.0.0.1:5432/${POSTGRES_DB}" + +# Check if already running +if [ -f "$STATE_FILE" ]; then + BACKEND_PID=$(jq -r '.backendPid // empty' "$STATE_FILE" 2>/dev/null) + if [ -n "$BACKEND_PID" ] && kill -0 "$BACKEND_PID" 2>/dev/null; then + echo "Already running (backend PID $BACKEND_PID). Run dev-stop.sh first." + exit 0 + fi +fi + +# Start PostgreSQL +echo "Starting PostgreSQL..." +podman-compose -f "$ROOT_DIR/compose/dev.yml" up -d 2>/dev/null || true + +# Wait for PostgreSQL +echo "Waiting for PostgreSQL..." +for i in {1..30}; do + if nc -z 127.0.0.1 5432 2>/dev/null; then + break + fi + sleep 1 +done + +# Start backend +echo "Starting backend..." +cd "$ROOT_DIR/backend" +cargo run > "$BACKEND_LOG" 2>&1 & +BACKEND_PID=$! + +# Start frontend +echo "Starting frontend..." +cd "$ROOT_DIR/frontend" +npm run dev > "$FRONTEND_LOG" 2>&1 & +FRONTEND_PID=$! + +# Save state +cat > "$STATE_FILE" << EOF +{ + "backendPid": $BACKEND_PID, + "frontendPid": $FRONTEND_PID, + "startedAt": "$(date -Iseconds)" +} +EOF + +sleep 3 + +# Status report +echo "" +echo "=== Likwid Dev Environment ===" +if kill -0 "$BACKEND_PID" 2>/dev/null; then + echo "Backend: Running (PID $BACKEND_PID) - http://127.0.0.1:3000" +else + echo "Backend: FAILED - check $BACKEND_LOG" +fi +if kill -0 "$FRONTEND_PID" 2>/dev/null; then + echo "Frontend: Running (PID $FRONTEND_PID) - http://localhost:4321" +else + echo "Frontend: FAILED - check $FRONTEND_LOG" +fi +echo "PostgreSQL: Running on port 5432" +echo "" +echo "Logs: $STATE_DIR" +echo "Stop: ./scripts/dev-stop.sh" diff --git a/scripts/dev-stop.ps1 b/scripts/dev-stop.ps1 new file mode 100644 index 0000000..ab79528 --- /dev/null +++ b/scripts/dev-stop.ps1 @@ -0,0 +1,71 @@ +<# +.SYNOPSIS + Stops the Likwid development environment. +.DESCRIPTION + Gracefully stops backend, frontend, and PostgreSQL container. +#> +[CmdletBinding()] +param() + +$ErrorActionPreference = 'Continue' + +$root = Split-Path -Parent $PSScriptRoot +$stateDir = Join-Path $PSScriptRoot '.dev' +$stateFile = Join-Path $stateDir 'state.json' + +function Stop-ProcessSafely([int]$ProcessId, [string]$Name) { + if (-not $ProcessId) { return $false } + + $proc = Get-Process -Id $ProcessId -ErrorAction SilentlyContinue + if (-not $proc) { return $false } + + Write-Host "Stopping $Name (PID $ProcessId)..." + Stop-Process -Id $ProcessId -ErrorAction SilentlyContinue + Start-Sleep -Milliseconds 500 + + # Force kill if still running + $proc = Get-Process -Id $ProcessId -ErrorAction SilentlyContinue + if ($proc) { + Stop-Process -Id $ProcessId -Force -ErrorAction SilentlyContinue + } + return $true +} + +function Stop-ProcessOnPort([int]$Port) { + $connections = netstat -ano 2>$null | Select-String ":$Port.*LISTENING" + foreach ($conn in $connections) { + $parts = ($conn.Line -replace '\s+', ' ').Trim().Split(' ') + $procId = [int]$parts[-1] + if ($procId -gt 0) { + Stop-ProcessSafely -ProcessId $procId -Name "process on port $Port" | Out-Null + } + } +} + +Write-Host "=== Stopping Likwid Dev Environment ===" + +# Stop from saved state +if (Test-Path $stateFile) { + $state = Get-Content -Raw $stateFile | ConvertFrom-Json -ErrorAction SilentlyContinue + if ($state) { + Stop-ProcessSafely -ProcessId $state.frontendPid -Name 'Frontend' | Out-Null + Stop-ProcessSafely -ProcessId $state.backendPid -Name 'Backend' | Out-Null + } +} + +# Cleanup any orphaned processes on the ports +Stop-ProcessOnPort -Port 4321 +Stop-ProcessOnPort -Port 3000 + +# Stop PostgreSQL container +Write-Host "Stopping PostgreSQL container..." +$composeFile = Join-Path $root 'compose/dev.yml' +podman-compose -f $composeFile down 2>$null + +# Clean up state file +if (Test-Path $stateFile) { + Remove-Item -Force $stateFile +} + +Write-Host "" +Write-Host "All services stopped." diff --git a/scripts/dev-stop.sh b/scripts/dev-stop.sh new file mode 100644 index 0000000..71a9e24 --- /dev/null +++ b/scripts/dev-stop.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Likwid Development Environment Stopper +# Stops backend, frontend, and optionally PostgreSQL + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(dirname "$SCRIPT_DIR")" +STATE_DIR="$SCRIPT_DIR/.dev" +STATE_FILE="$STATE_DIR/state.json" + +STOP_DB="${1:-false}" + +echo "Stopping Likwid development environment..." + +# Read state file +if [ -f "$STATE_FILE" ]; then + BACKEND_PID=$(jq -r '.backendPid // empty' "$STATE_FILE" 2>/dev/null) + FRONTEND_PID=$(jq -r '.frontendPid // empty' "$STATE_FILE" 2>/dev/null) + + # Stop backend + if [ -n "$BACKEND_PID" ]; then + if kill -0 "$BACKEND_PID" 2>/dev/null; then + echo "Stopping backend (PID $BACKEND_PID)..." + kill "$BACKEND_PID" 2>/dev/null || true + sleep 1 + kill -9 "$BACKEND_PID" 2>/dev/null || true + fi + fi + + # Stop frontend + if [ -n "$FRONTEND_PID" ]; then + if kill -0 "$FRONTEND_PID" 2>/dev/null; then + echo "Stopping frontend (PID $FRONTEND_PID)..." + kill "$FRONTEND_PID" 2>/dev/null || true + sleep 1 + kill -9 "$FRONTEND_PID" 2>/dev/null || true + fi + fi + + rm -f "$STATE_FILE" +fi + +# Also kill any stray processes +pkill -f "likwid" 2>/dev/null || true +pkill -f "astro dev" 2>/dev/null || true + +# Stop PostgreSQL if requested +if [ "$STOP_DB" = "--all" ] || [ "$STOP_DB" = "-a" ]; then + echo "Stopping PostgreSQL..." + podman-compose -f "$ROOT_DIR/compose/dev.yml" down 2>/dev/null || true +fi + +echo "Done." diff --git a/scripts/dev-test.ps1 b/scripts/dev-test.ps1 new file mode 100644 index 0000000..8dad427 --- /dev/null +++ b/scripts/dev-test.ps1 @@ -0,0 +1,61 @@ +<# +.SYNOPSIS + Runs tests for the Likwid project. +.DESCRIPTION + Runs backend (Rust) and frontend tests. + Use -Backend, -Frontend, or -All flags. +#> +[CmdletBinding()] +param( + [switch]$Backend, + [switch]$Frontend, + [switch]$All +) + +$ErrorActionPreference = 'Stop' +$root = Split-Path -Parent $PSScriptRoot + +# Default to all if no flags +if (-not $Backend -and -not $Frontend) { $All = $true } + +$results = @() + +# Backend tests +if ($Backend -or $All) { + Write-Host "=== Backend Tests ===" -ForegroundColor Cyan + Push-Location (Join-Path $root 'backend') + try { + $env:DATABASE_URL = "postgres://likwid:likwid@127.0.0.1:5432/likwid" + cargo test --no-fail-fast 2>&1 | Tee-Object -Variable backendOutput + $results += @{ Component = 'Backend'; Success = ($LASTEXITCODE -eq 0) } + } finally { + Pop-Location + } +} + +# Frontend tests +if ($Frontend -or $All) { + Write-Host "" + Write-Host "=== Frontend Tests ===" -ForegroundColor Cyan + Push-Location (Join-Path $root 'frontend') + try { + npm run check 2>&1 | Tee-Object -Variable frontendOutput + $results += @{ Component = 'Frontend'; Success = ($LASTEXITCODE -eq 0) } + } finally { + Pop-Location + } +} + +# Summary +Write-Host "" +Write-Host "=== Test Summary ===" -ForegroundColor Cyan +foreach ($r in $results) { + $status = if ($r.Success) { "PASS" } else { "FAIL" } + $color = if ($r.Success) { "Green" } else { "Red" } + Write-Host "$($r.Component): $status" -ForegroundColor $color +} + +$failed = $results | Where-Object { -not $_.Success } +if ($failed) { + exit 1 +} diff --git a/scripts/dev.ps1 b/scripts/dev.ps1 new file mode 100644 index 0000000..b484339 --- /dev/null +++ b/scripts/dev.ps1 @@ -0,0 +1 @@ +podman-compose -f compose/dev.yml up diff --git a/scripts/dev.sh b/scripts/dev.sh new file mode 100644 index 0000000..90466bd --- /dev/null +++ b/scripts/dev.sh @@ -0,0 +1,2 @@ +#!/bin/sh +podman-compose -f compose/dev.yml up diff --git a/scripts/post-reboot-setup.ps1 b/scripts/post-reboot-setup.ps1 new file mode 100644 index 0000000..64fcbca --- /dev/null +++ b/scripts/post-reboot-setup.ps1 @@ -0,0 +1,56 @@ +# Likwid - Post-Reboot WSL2 & Podman Setup Script +# Run this script after rebooting to complete the environment setup + +Write-Host "=== Likwid Post-Reboot Setup ===" -ForegroundColor Cyan + +# Step 1: Verify WSL2 is working +Write-Host "`n[1/4] Checking WSL2 status..." -ForegroundColor Yellow +$wslStatus = wsl --status 2>&1 +if ($LASTEXITCODE -ne 0) { + Write-Host "WSL2 is not ready. Please ensure virtualization is enabled in BIOS." -ForegroundColor Red + Write-Host "Run 'wsl --install --no-distribution' as administrator if needed." -ForegroundColor Red + exit 1 +} +Write-Host "WSL2 is ready!" -ForegroundColor Green + +# Step 2: Install openSUSE Tumbleweed +Write-Host "`n[2/4] Installing openSUSE Tumbleweed..." -ForegroundColor Yellow +$distros = wsl --list --quiet 2>&1 +if ($distros -match "openSUSE-Tumbleweed") { + Write-Host "openSUSE Tumbleweed is already installed." -ForegroundColor Green +} else { + Write-Host "Installing openSUSE Tumbleweed (this may take a while)..." -ForegroundColor Yellow + wsl --install -d openSUSE-Tumbleweed + if ($LASTEXITCODE -ne 0) { + Write-Host "Failed to install openSUSE Tumbleweed." -ForegroundColor Red + exit 1 + } + Write-Host "openSUSE Tumbleweed installed!" -ForegroundColor Green +} + +# Step 3: Set openSUSE as default +Write-Host "`n[3/4] Setting openSUSE Tumbleweed as default WSL distribution..." -ForegroundColor Yellow +wsl --set-default openSUSE-Tumbleweed + +# Step 4: Configure Podman in WSL2 +Write-Host "`n[4/4] Configuring Podman in openSUSE Tumbleweed..." -ForegroundColor Yellow +wsl -d openSUSE-Tumbleweed -e bash -c " + echo 'Installing Podman and podman-compose...' + sudo zypper refresh + sudo zypper install -y podman podman-compose + + echo 'Enabling rootless Podman socket...' + systemctl --user enable --now podman.socket + + echo 'Verifying installation...' + podman --version + podman-compose --version +" + +Write-Host "`n=== Setup Complete ===" -ForegroundColor Cyan +Write-Host "Next steps:" -ForegroundColor Yellow +Write-Host "1. Open Podman Desktop and configure it to use WSL2 backend" +Write-Host "2. Start Podman machine in Podman Desktop" +Write-Host "3. Run '.\scripts\dev.ps1' to start development services" +Write-Host "4. Run 'cargo run' in backend/ to start the backend" +Write-Host "5. Run 'npm run dev' in frontend/ to start the frontend" diff --git a/scripts/prepare-production.ps1 b/scripts/prepare-production.ps1 new file mode 100644 index 0000000..fca59dd --- /dev/null +++ b/scripts/prepare-production.ps1 @@ -0,0 +1,37 @@ +# Prepare Production Build Script (Windows PowerShell) +# Creates a production-ready backend without demo seed data + +$ErrorActionPreference = "Stop" + +Write-Host "=== Preparing Production Build ===" -ForegroundColor Cyan + +$projectRoot = "$PSScriptRoot\.." +$backendDir = "$projectRoot\backend" +$migrationsDir = "$backendDir\migrations" +$demoMigration = "$migrationsDir\20260127150000_demo_seed_data.sql" + +# Check if demo migration exists +if (Test-Path $demoMigration) { + Write-Host "`nDemo seed migration found. For production:" -ForegroundColor Yellow + Write-Host " 1. This file should be EXCLUDED from production deployments" + Write-Host " 2. It contains test users and sample data" + Write-Host "" + + $action = Read-Host "Remove demo migration for production build? (y/N)" + if ($action -eq "y" -or $action -eq "Y") { + # Backup first + $backupPath = "$demoMigration.backup" + Copy-Item $demoMigration $backupPath + Remove-Item $demoMigration + Write-Host "Demo migration removed (backup at $backupPath)" -ForegroundColor Green + } else { + Write-Host "Keeping demo migration. Remember to remove for production!" -ForegroundColor Yellow + } +} else { + Write-Host "Demo migration not found - already production ready" -ForegroundColor Green +} + +Write-Host "`n=== Production Preparation Complete ===" -ForegroundColor Green +Write-Host "`nNext steps:" +Write-Host " 1. Configure compose/.env.production" +Write-Host " 2. Run: podman-compose -f compose/production.yml up -d"