From 6386e6b43786ad8aab93c7a98513aa8963d29195 Mon Sep 17 00:00:00 2001 From: Lakee Sivaraya Date: Tue, 13 Jan 2026 11:46:10 -0800 Subject: [PATCH 001/101] updates --- PLAN.md | 250 + apps/sim/app/api/table/[tableId]/route.ts | 208 + .../api/table/[tableId]/rows/[rowId]/route.ts | 331 + .../sim/app/api/table/[tableId]/rows/route.ts | 780 ++ apps/sim/app/api/table/route.ts | 297 + .../tables/components/create-table-modal.tsx | 278 + .../[workspaceId]/tables/components/index.ts | 2 + .../tables/components/table-card.tsx | 143 + .../workspace/[workspaceId]/tables/layout.tsx | 10 + .../workspace/[workspaceId]/tables/page.tsx | 26 + .../workspace/[workspaceId]/tables/tables.tsx | 142 + .../workflow-block/workflow-block.tsx | 63 +- .../w/components/sidebar/sidebar.tsx | 8 +- apps/sim/blocks/blocks/table.ts | 544 + apps/sim/blocks/registry.ts | 2 + apps/sim/components/icons.tsx | 21 + apps/sim/hooks/queries/use-tables.ts | 152 + apps/sim/lib/table/constants.ts | 28 + apps/sim/lib/table/index.ts | 3 + apps/sim/lib/table/query-builder.ts | 144 + apps/sim/lib/table/validation.ts | 199 + apps/sim/tools/error-extractors.ts | 50 + apps/sim/tools/registry.ts | 24 +- apps/sim/tools/table/batch-insert-rows.ts | 93 + apps/sim/tools/table/create.ts | 73 + apps/sim/tools/table/delete-row.ts | 64 + apps/sim/tools/table/delete-rows-by-filter.ts | 78 + apps/sim/tools/table/get-row.ts | 61 + apps/sim/tools/table/index.ts | 11 + apps/sim/tools/table/insert-row.ts | 77 + apps/sim/tools/table/list.ts | 47 + apps/sim/tools/table/query-rows.ts | 103 + apps/sim/tools/table/types.ts | 186 + apps/sim/tools/table/update-row.ts | 83 + apps/sim/tools/table/update-rows-by-filter.ts | 97 + .../migrations/0140_awesome_killer_shrike.sql | 36 + .../db/migrations/meta/0140_snapshot.json | 10046 ++++++++++++++++ packages/db/migrations/meta/_journal.json | 7 + packages/db/schema.ts | 63 + 39 files changed, 14821 insertions(+), 9 deletions(-) create mode 100644 PLAN.md create mode 100644 apps/sim/app/api/table/[tableId]/route.ts create mode 100644 apps/sim/app/api/table/[tableId]/rows/[rowId]/route.ts create mode 100644 apps/sim/app/api/table/[tableId]/rows/route.ts create mode 100644 apps/sim/app/api/table/route.ts create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/components/create-table-modal.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/components/index.ts create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/components/table-card.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/layout.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/page.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/tables/tables.tsx create mode 100644 apps/sim/blocks/blocks/table.ts create mode 100644 apps/sim/hooks/queries/use-tables.ts create mode 100644 apps/sim/lib/table/constants.ts create mode 100644 apps/sim/lib/table/index.ts create mode 100644 apps/sim/lib/table/query-builder.ts create mode 100644 apps/sim/lib/table/validation.ts create mode 100644 apps/sim/tools/table/batch-insert-rows.ts create mode 100644 apps/sim/tools/table/create.ts create mode 100644 apps/sim/tools/table/delete-row.ts create mode 100644 apps/sim/tools/table/delete-rows-by-filter.ts create mode 100644 apps/sim/tools/table/get-row.ts create mode 100644 apps/sim/tools/table/index.ts create mode 100644 apps/sim/tools/table/insert-row.ts create mode 100644 apps/sim/tools/table/list.ts create mode 100644 apps/sim/tools/table/query-rows.ts create mode 100644 apps/sim/tools/table/types.ts create mode 100644 apps/sim/tools/table/update-row.ts create mode 100644 apps/sim/tools/table/update-rows-by-filter.ts create mode 100644 packages/db/migrations/0140_awesome_killer_shrike.sql create mode 100644 packages/db/migrations/meta/0140_snapshot.json diff --git a/PLAN.md b/PLAN.md new file mode 100644 index 0000000000..2c04fbb5f1 --- /dev/null +++ b/PLAN.md @@ -0,0 +1,250 @@ +# Table Block Implementation Plan + +> Create a new "table" block type that enables users to define schemas and perform CRUD operations on lightweight, workspace/workflow-scoped tables stored in the existing PostgreSQL database using JSONB with application-level schema enforcement. + +## Table of Contents + +- [Architecture Overview](#architecture-overview) +- [Data Model](#data-model) +- [Implementation Files](#implementation-files) +- [Key Design Decisions](#key-design-decisions) +- [Limits and Limitations](#limits-and-limitations) +- [Implementation Checklist](#implementation-checklist) + +## Architecture Overview + +```mermaid +flowchart TB + subgraph UI [Block UI Layer] + TableBlock[Table Block] + SchemaEditor[Schema Editor SubBlock] + end + + subgraph Tools [Tool Layer] + CreateTable[table_create] + Insert[table_insert] + Select[table_select] + Update[table_update] + Delete[table_delete] + DropTable[table_drop] + end + + subgraph API [API Routes] + TableAPI["/api/tables"] + RowAPI["/api/tables/tableId/rows"] + end + + subgraph DB [PostgreSQL] + SimTable[sim_table] + SimTableRow[sim_table_row] + end + + TableBlock --> Tools + Tools --> API + API --> DB +``` + +## Data Model + +Two new tables in the existing PostgreSQL database: + +### `sim_table` - Table Definitions + +| Column | Type | Description | +|--------|------|-------------| +| id | text | Primary key | +| workspace_id | text | FK to workspace | +| workflow_id | text | FK to workflow (nullable for workspace-scope) | +| name | text | Table name (unique per scope) | +| schema | jsonb | Column definitions with types/constraints | +| created_by | text | FK to user | +| created_at | timestamp | Creation time | +| updated_at | timestamp | Last update time | + +### `sim_table_row` - Row Data + +| Column | Type | Description | +|--------|------|-------------| +| id | text | Primary key | +| table_id | text | FK to sim_table | +| data | jsonb | Row data (validated against schema) | +| created_at | timestamp | Creation time | +| updated_at | timestamp | Last update time | + +### Schema Format + +**Example schema definition:** + +```json +{ + "columns": [ + { "name": "id", "type": "string", "primaryKey": true }, + { "name": "email", "type": "string", "required": true, "unique": true }, + { "name": "age", "type": "number" }, + { "name": "active", "type": "boolean", "default": true } + ] +} +``` + +**Supported Types:** `string`, `number`, `boolean`, `date`, `json` + +## Implementation Files + +### 1. Database Schema + +- `packages/db/schema.ts` - Add `simTable` and `simTableRow` table definitions +- Generate migration for the new tables + +### 2. Tools (`apps/sim/tools/table/`) + +| File | Purpose | +|------|---------| +| `types.ts` | Type definitions for params/responses | +| `create.ts` | Create table with schema | +| `insert.ts` | Insert row(s) with schema validation | +| `select.ts` | Query rows with filtering | +| `update.ts` | Update rows with schema validation | +| `delete.ts` | Delete rows | +| `drop.ts` | Drop table | +| `index.ts` | Barrel export | + +### 3. Block Definition + +- `apps/sim/blocks/blocks/table.ts` - Block config with: + - Operation dropdown (create, insert, select, update, delete, drop) + - Scope selector (workspace/workflow) + - Table selector (for existing tables) + - Schema editor (for create operation) + - Data/query inputs (operation-specific) + +### 4. API Routes + +- `apps/sim/app/api/tables/route.ts` - Create table, list tables +- `apps/sim/app/api/tables/[tableId]/route.ts` - Get/drop table +- `apps/sim/app/api/tables/[tableId]/rows/route.ts` - CRUD on rows + +### 5. Schema Validation Library + +- `apps/sim/lib/tables/schema.ts` - Schema validation utilities +- `apps/sim/lib/tables/types.ts` - Shared types + +## Key Design Decisions + +1. **Schema Enforcement**: Application-layer validation before database writes. JSONB stores data, but every insert/update validates against the table's schema. + +2. **Concurrency**: PostgreSQL handles concurrent reads/writes natively. Row-level locking for updates. + +3. **Indexing**: GIN index on `data` column for efficient JSONB queries. Additional indexes on `table_id` for fast row lookups. + +4. **Scope Resolution**: Tables with `workflow_id = NULL` are workspace-scoped. Tables with `workflow_id` set are workflow-scoped. + +5. **Table Selector**: New SubBlock type `table-selector` that fetches available tables based on current workspace/workflow context. + +## Limits and Limitations + +### Table Limits + +| Limit | Free Plan | Pro Plan | Enterprise | +|-------|-----------|----------|------------| +| Tables per workspace | 10 | 50 | Unlimited | +| Tables per workflow | 5 | 20 | Unlimited | +| Columns per table | 50 | 100 | 200 | + +### Row Limits + +| Limit | Free Plan | Pro Plan | Enterprise | +|-------|-----------|----------|------------| +| Rows per table | 10,000 | 100,000 | 1,000,000 | +| Batch insert size | 100 | 500 | 1,000 | +| Batch update/delete size | 100 | 500 | 1,000 | + +### Size Limits + +| Limit | Value | Rationale | +|-------|-------|-----------| +| Column name length | 64 chars | PostgreSQL identifier limit | +| Table name length | 64 chars | PostgreSQL identifier limit | +| String field max length | 65,535 chars | ~64KB per text field | +| JSON field max size | 1 MB | PostgreSQL JSONB practical limit | +| Single row max size | 2 MB | Reasonable row size limit | +| Total table data size | Based on plan | Tied to workspace storage quota | + +### Query Limits + +| Limit | Value | Notes | +|-------|-------|-------| +| Default page size | 100 rows | Can be overridden up to max | +| Max page size | 1,000 rows | Prevents memory issues | +| Max filter conditions | 20 | AND/OR conditions combined | +| Query timeout | 30 seconds | Prevents long-running queries | +| Max concurrent queries per table | 50 | Rate limiting per table | + +### Schema Constraints + +| Constraint | Limit | +|------------|-------| +| Primary key columns | 1 (single column only) | +| Unique constraints | 5 per table | +| Required (NOT NULL) columns | Unlimited | +| Default values | Supported for all types | +| Foreign keys | Not supported (v1) | +| Computed columns | Not supported (v1) | +| Indexes | Auto-created for primary key and unique columns | + +### Data Type Specifications + +| Type | Storage | Min | Max | Notes | +|------|---------|-----|-----|-------| +| `string` | text | 0 chars | 65,535 chars | UTF-8 encoded | +| `number` | double precision | -1.7e308 | 1.7e308 | IEEE 754 double | +| `boolean` | boolean | - | - | true/false | +| `date` | timestamp | 4713 BC | 294276 AD | ISO 8601 format | +| `json` | jsonb | - | 1 MB | Nested objects/arrays | + +### Operational Limitations + +1. **No Transactions Across Tables**: Each operation is atomic to a single table. Cross-table transactions are not supported. + +2. **No JOINs**: Cannot join data between tables. Use workflow logic to combine data from multiple tables. + +3. **No Triggers/Hooks**: No automatic actions on insert/update/delete. Use workflow blocks for reactive logic. + +4. **No Full-Text Search**: Basic filtering only. For full-text search, use the Knowledge Base feature. + +5. **No Schema Migrations**: Schema changes require dropping and recreating the table (with data loss). Future versions may support additive migrations. + +6. **Query Complexity**: Only basic operators supported: + - Comparison: `=`, `!=`, `>`, `<`, `>=`, `<=` + - String: `LIKE`, `ILIKE`, `STARTS_WITH`, `ENDS_WITH`, `CONTAINS` + - Logical: `AND`, `OR`, `NOT` + - Null checks: `IS NULL`, `IS NOT NULL` + - Array: `IN`, `NOT IN` + +### Performance Characteristics + +| Operation | Expected Latency | Notes | +|-----------|------------------|-------| +| Insert (single row) | < 50ms | With schema validation | +| Insert (batch 100) | < 200ms | Parallel validation | +| Select (indexed) | < 20ms | Primary key or unique column | +| Select (filtered, 1K rows) | < 100ms | With GIN index | +| Update (single row) | < 50ms | By primary key | +| Delete (single row) | < 30ms | By primary key | + +### Storage Accounting + +- Table storage counts toward workspace storage quota +- Calculated as: `sum(row_data_size) + schema_overhead` +- Schema overhead: ~1KB per table +- Row overhead: ~100 bytes per row (metadata, timestamps) + +## Implementation Checklist + +- [ ] Add `simTable` and `simTableRow` to `packages/db/schema.ts` and generate migration +- [ ] Create `apps/sim/lib/tables/` with schema validation and types +- [ ] Create `apps/sim/tools/table/` with all 6 tool implementations +- [ ] Register tools in `apps/sim/tools/registry.ts` +- [ ] Create API routes for tables and rows CRUD operations +- [ ] Create `apps/sim/blocks/blocks/table.ts` block definition +- [ ] Register block in `apps/sim/blocks/registry.ts` +- [ ] Add `TableIcon` to `apps/sim/components/icons.tsx` diff --git a/apps/sim/app/api/table/[tableId]/route.ts b/apps/sim/app/api/table/[tableId]/route.ts new file mode 100644 index 0000000000..58eed19a17 --- /dev/null +++ b/apps/sim/app/api/table/[tableId]/route.ts @@ -0,0 +1,208 @@ +import { db } from '@sim/db' +import { permissions, userTableDefinitions, userTableRows, workspace } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq, isNull } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkHybridAuth } from '@/lib/auth/hybrid' +import { generateRequestId } from '@/lib/core/utils/request' + +const logger = createLogger('TableDetailAPI') + +const GetTableSchema = z.object({ + workspaceId: z.string().min(1), +}) + +/** + * Check if user has write access to workspace + */ +async function checkWorkspaceAccess(workspaceId: string, userId: string) { + const [workspaceData] = await db + .select({ + id: workspace.id, + ownerId: workspace.ownerId, + }) + .from(workspace) + .where(eq(workspace.id, workspaceId)) + .limit(1) + + if (!workspaceData) { + return { hasAccess: false, canWrite: false } + } + + if (workspaceData.ownerId === userId) { + return { hasAccess: true, canWrite: true } + } + + const [permission] = await db + .select({ + permissionType: permissions.permissionType, + }) + .from(permissions) + .where( + and( + eq(permissions.userId, userId), + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId) + ) + ) + .limit(1) + + if (!permission) { + return { hasAccess: false, canWrite: false } + } + + const canWrite = permission.permissionType === 'admin' || permission.permissionType === 'write' + + return { + hasAccess: true, + canWrite, + } +} + +/** + * GET /api/table/[tableId]?workspaceId=xxx + * Get table details + */ +export async function GET( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const validated = GetTableSchema.parse({ + workspaceId: searchParams.get('workspaceId'), + }) + + // Check workspace access + const { hasAccess } = await checkWorkspaceAccess(validated.workspaceId, authResult.userId) + + if (!hasAccess) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get table + const [table] = await db + .select() + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + logger.info(`[${requestId}] Retrieved table ${tableId}`) + + return NextResponse.json({ + table: { + id: table.id, + name: table.name, + description: table.description, + schema: table.schema, + rowCount: table.rowCount, + maxRows: table.maxRows, + createdAt: table.createdAt.toISOString(), + updatedAt: table.updatedAt.toISOString(), + }, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error getting table:`, error) + return NextResponse.json({ error: 'Failed to get table' }, { status: 500 }) + } +} + +/** + * DELETE /api/table/[tableId]?workspaceId=xxx + * Delete a table (soft delete) + */ +export async function DELETE( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const validated = GetTableSchema.parse({ + workspaceId: searchParams.get('workspaceId'), + }) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Soft delete table + const [deletedTable] = await db + .update(userTableDefinitions) + .set({ + deletedAt: new Date(), + updatedAt: new Date(), + }) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .returning() + + if (!deletedTable) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Delete all rows + await db.delete(userTableRows).where(eq(userTableRows.tableId, tableId)) + + logger.info(`[${requestId}] Deleted table ${tableId}`) + + return NextResponse.json({ + message: 'Table deleted successfully', + success: true, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error deleting table:`, error) + return NextResponse.json({ error: 'Failed to delete table' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/table/[tableId]/rows/[rowId]/route.ts b/apps/sim/app/api/table/[tableId]/rows/[rowId]/route.ts new file mode 100644 index 0000000000..18a4c9aabb --- /dev/null +++ b/apps/sim/app/api/table/[tableId]/rows/[rowId]/route.ts @@ -0,0 +1,331 @@ +import { db } from '@sim/db' +import { permissions, userTableDefinitions, userTableRows, workspace } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq, isNull, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkHybridAuth } from '@/lib/auth/hybrid' +import { generateRequestId } from '@/lib/core/utils/request' +import type { TableSchema } from '@/lib/table' +import { validateRowAgainstSchema, validateRowSize } from '@/lib/table' + +const logger = createLogger('TableRowAPI') + +const GetRowSchema = z.object({ + workspaceId: z.string().min(1), +}) + +const UpdateRowSchema = z.object({ + workspaceId: z.string().min(1), + data: z.record(z.any()), +}) + +const DeleteRowSchema = z.object({ + workspaceId: z.string().min(1), +}) + +/** + * Check if user has write access to workspace + */ +async function checkWorkspaceAccess(workspaceId: string, userId: string) { + const [workspaceData] = await db + .select({ + id: workspace.id, + ownerId: workspace.ownerId, + }) + .from(workspace) + .where(eq(workspace.id, workspaceId)) + .limit(1) + + if (!workspaceData) { + return { hasAccess: false, canWrite: false } + } + + if (workspaceData.ownerId === userId) { + return { hasAccess: true, canWrite: true } + } + + const [permission] = await db + .select({ + permissionType: permissions.permissionType, + }) + .from(permissions) + .where( + and( + eq(permissions.userId, userId), + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId) + ) + ) + .limit(1) + + if (!permission) { + return { hasAccess: false, canWrite: false } + } + + const canWrite = permission.permissionType === 'admin' || permission.permissionType === 'write' + + return { + hasAccess: true, + canWrite, + } +} + +/** + * GET /api/table/[tableId]/rows/[rowId]?workspaceId=xxx + * Get a single row by ID + */ +export async function GET( + request: NextRequest, + { params }: { params: Promise<{ tableId: string; rowId: string }> } +) { + const requestId = generateRequestId() + const { tableId, rowId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const validated = GetRowSchema.parse({ + workspaceId: searchParams.get('workspaceId'), + }) + + // Check workspace access + const { hasAccess } = await checkWorkspaceAccess(validated.workspaceId, authResult.userId) + + if (!hasAccess) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get row + const [row] = await db + .select({ + id: userTableRows.id, + data: userTableRows.data, + createdAt: userTableRows.createdAt, + updatedAt: userTableRows.updatedAt, + }) + .from(userTableRows) + .where( + and( + eq(userTableRows.id, rowId), + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId) + ) + ) + .limit(1) + + if (!row) { + return NextResponse.json({ error: 'Row not found' }, { status: 404 }) + } + + logger.info(`[${requestId}] Retrieved row ${rowId} from table ${tableId}`) + + return NextResponse.json({ + row: { + id: row.id, + data: row.data, + createdAt: row.createdAt.toISOString(), + updatedAt: row.updatedAt.toISOString(), + }, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error getting row:`, error) + return NextResponse.json({ error: 'Failed to get row' }, { status: 500 }) + } +} + +/** + * PATCH /api/table/[tableId]/rows/[rowId] + * Update an existing row + */ +export async function PATCH( + request: NextRequest, + { params }: { params: Promise<{ tableId: string; rowId: string }> } +) { + const requestId = generateRequestId() + const { tableId, rowId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + const validated = UpdateRowSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get table definition + const [table] = await db + .select() + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Validate row size + const sizeValidation = validateRowSize(validated.data) + if (!sizeValidation.valid) { + return NextResponse.json( + { error: 'Invalid row data', details: sizeValidation.errors }, + { status: 400 } + ) + } + + // Validate row against schema + const rowValidation = validateRowAgainstSchema(validated.data, table.schema as TableSchema) + if (!rowValidation.valid) { + return NextResponse.json( + { error: 'Row data does not match schema', details: rowValidation.errors }, + { status: 400 } + ) + } + + // Update row + const now = new Date() + + const [updatedRow] = await db + .update(userTableRows) + .set({ + data: validated.data, + updatedAt: now, + }) + .where( + and( + eq(userTableRows.id, rowId), + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId) + ) + ) + .returning() + + if (!updatedRow) { + return NextResponse.json({ error: 'Row not found' }, { status: 404 }) + } + + logger.info(`[${requestId}] Updated row ${rowId} in table ${tableId}`) + + return NextResponse.json({ + row: { + id: updatedRow.id, + data: updatedRow.data, + createdAt: updatedRow.createdAt.toISOString(), + updatedAt: updatedRow.updatedAt.toISOString(), + }, + message: 'Row updated successfully', + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error updating row:`, error) + return NextResponse.json({ error: 'Failed to update row' }, { status: 500 }) + } +} + +/** + * DELETE /api/table/[tableId]/rows/[rowId] + * Delete a row + */ +export async function DELETE( + request: NextRequest, + { params }: { params: Promise<{ tableId: string; rowId: string }> } +) { + const requestId = generateRequestId() + const { tableId, rowId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + const validated = DeleteRowSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Delete row + const [deletedRow] = await db + .delete(userTableRows) + .where( + and( + eq(userTableRows.id, rowId), + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId) + ) + ) + .returning() + + if (!deletedRow) { + return NextResponse.json({ error: 'Row not found' }, { status: 404 }) + } + + // Update row count + await db + .update(userTableDefinitions) + .set({ + rowCount: sql`${userTableDefinitions.rowCount} - 1`, + updatedAt: new Date(), + }) + .where(eq(userTableDefinitions.id, tableId)) + + logger.info(`[${requestId}] Deleted row ${rowId} from table ${tableId}`) + + return NextResponse.json({ + message: 'Row deleted successfully', + deletedCount: 1, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error deleting row:`, error) + return NextResponse.json({ error: 'Failed to delete row' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/table/[tableId]/rows/route.ts b/apps/sim/app/api/table/[tableId]/rows/route.ts new file mode 100644 index 0000000000..4db02322f9 --- /dev/null +++ b/apps/sim/app/api/table/[tableId]/rows/route.ts @@ -0,0 +1,780 @@ +import { db } from '@sim/db' +import { permissions, userTableDefinitions, userTableRows, workspace } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq, isNull, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkHybridAuth } from '@/lib/auth/hybrid' +import { generateRequestId } from '@/lib/core/utils/request' +import type { QueryFilter, TableSchema } from '@/lib/table' +import { TABLE_LIMITS, validateRowAgainstSchema, validateRowSize } from '@/lib/table' +import { buildFilterClause, buildSortClause } from '@/lib/table/query-builder' + +const logger = createLogger('TableRowsAPI') + +const InsertRowSchema = z.object({ + workspaceId: z.string().min(1), + data: z.record(z.any()), +}) + +const BatchInsertRowsSchema = z.object({ + workspaceId: z.string().min(1), + rows: z.array(z.record(z.any())).min(1).max(1000), // Max 1000 rows per batch +}) + +const QueryRowsSchema = z.object({ + workspaceId: z.string().min(1), + filter: z.record(z.any()).optional(), + sort: z.record(z.enum(['asc', 'desc'])).optional(), + limit: z.coerce.number().int().min(1).max(TABLE_LIMITS.MAX_QUERY_LIMIT).optional().default(100), + offset: z.coerce.number().int().min(0).optional().default(0), +}) + +const UpdateRowsByFilterSchema = z.object({ + workspaceId: z.string().min(1), + filter: z.record(z.any()), // Required - must specify what to update + data: z.record(z.any()), // New data to set + limit: z.coerce.number().int().min(1).max(1000).optional(), // Safety limit for bulk updates +}) + +const DeleteRowsByFilterSchema = z.object({ + workspaceId: z.string().min(1), + filter: z.record(z.any()), // Required - must specify what to delete + limit: z.coerce.number().int().min(1).max(1000).optional(), // Safety limit for bulk deletes +}) + +/** + * Check if user has write access to workspace + */ +async function checkWorkspaceAccess(workspaceId: string, userId: string) { + const [workspaceData] = await db + .select({ + id: workspace.id, + ownerId: workspace.ownerId, + }) + .from(workspace) + .where(eq(workspace.id, workspaceId)) + .limit(1) + + if (!workspaceData) { + return { hasAccess: false, canWrite: false } + } + + if (workspaceData.ownerId === userId) { + return { hasAccess: true, canWrite: true } + } + + const [permission] = await db + .select({ + permissionType: permissions.permissionType, + }) + .from(permissions) + .where( + and( + eq(permissions.userId, userId), + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId) + ) + ) + .limit(1) + + if (!permission) { + return { hasAccess: false, canWrite: false } + } + + const canWrite = permission.permissionType === 'admin' || permission.permissionType === 'write' + + return { + hasAccess: true, + canWrite, + } +} + +/** + * Handle batch insert of multiple rows + */ +async function handleBatchInsert(requestId: string, tableId: string, body: any, userId: string) { + const validated = BatchInsertRowsSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess(validated.workspaceId, userId) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get table definition + const [table] = await db + .select() + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Check row count limit + const remainingCapacity = table.maxRows - table.rowCount + if (remainingCapacity < validated.rows.length) { + return NextResponse.json( + { + error: `Insufficient capacity. Can only insert ${remainingCapacity} more rows (table has ${table.rowCount}/${table.maxRows} rows)`, + }, + { status: 400 } + ) + } + + // Validate all rows + const errors: { row: number; errors: string[] }[] = [] + + for (let i = 0; i < validated.rows.length; i++) { + const rowData = validated.rows[i] + + // Validate row size + const sizeValidation = validateRowSize(rowData) + if (!sizeValidation.valid) { + errors.push({ row: i, errors: sizeValidation.errors }) + continue + } + + // Validate row against schema + const rowValidation = validateRowAgainstSchema(rowData, table.schema as TableSchema) + if (!rowValidation.valid) { + errors.push({ row: i, errors: rowValidation.errors }) + } + } + + if (errors.length > 0) { + return NextResponse.json( + { + error: 'Validation failed for some rows', + details: errors, + }, + { status: 400 } + ) + } + + // Insert all rows + const now = new Date() + const rowsToInsert = validated.rows.map((data) => ({ + id: `row_${crypto.randomUUID().replace(/-/g, '')}`, + tableId, + workspaceId: validated.workspaceId, + data, + createdAt: now, + updatedAt: now, + createdBy: userId, + })) + + const insertedRows = await db.insert(userTableRows).values(rowsToInsert).returning() + + // Update row count + await db + .update(userTableDefinitions) + .set({ + rowCount: sql`${userTableDefinitions.rowCount} + ${validated.rows.length}`, + updatedAt: now, + }) + .where(eq(userTableDefinitions.id, tableId)) + + logger.info(`[${requestId}] Batch inserted ${insertedRows.length} rows into table ${tableId}`) + + return NextResponse.json({ + rows: insertedRows.map((r) => ({ + id: r.id, + data: r.data, + createdAt: r.createdAt.toISOString(), + updatedAt: r.updatedAt.toISOString(), + })), + insertedCount: insertedRows.length, + message: `Successfully inserted ${insertedRows.length} rows`, + }) +} + +/** + * POST /api/table/[tableId]/rows + * Insert a new row into the table + * Supports both single row and batch insert (NDJSON format) + */ +export async function POST( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + + // Check if this is a batch insert + if (body.rows && Array.isArray(body.rows)) { + return handleBatchInsert(requestId, tableId, body, authResult.userId) + } + + // Single row insert + const validated = InsertRowSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get table definition + const [table] = await db + .select() + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Validate row size + const sizeValidation = validateRowSize(validated.data) + if (!sizeValidation.valid) { + return NextResponse.json( + { error: 'Invalid row data', details: sizeValidation.errors }, + { status: 400 } + ) + } + + // Validate row against schema + const rowValidation = validateRowAgainstSchema(validated.data, table.schema as TableSchema) + if (!rowValidation.valid) { + return NextResponse.json( + { error: 'Row data does not match schema', details: rowValidation.errors }, + { status: 400 } + ) + } + + // Check row count limit + if (table.rowCount >= table.maxRows) { + return NextResponse.json( + { error: `Table row limit reached (${table.maxRows} rows max)` }, + { status: 400 } + ) + } + + // Insert row + const rowId = `row_${crypto.randomUUID().replace(/-/g, '')}` + const now = new Date() + + const [row] = await db + .insert(userTableRows) + .values({ + id: rowId, + tableId, + workspaceId: validated.workspaceId, + data: validated.data, + createdAt: now, + updatedAt: now, + createdBy: authResult.userId, + }) + .returning() + + // Update row count + await db + .update(userTableDefinitions) + .set({ + rowCount: sql`${userTableDefinitions.rowCount} + 1`, + updatedAt: now, + }) + .where(eq(userTableDefinitions.id, tableId)) + + logger.info(`[${requestId}] Inserted row ${rowId} into table ${tableId}`) + + return NextResponse.json({ + row: { + id: row.id, + data: row.data, + createdAt: row.createdAt.toISOString(), + updatedAt: row.updatedAt.toISOString(), + }, + message: 'Row inserted successfully', + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error inserting row:`, error) + return NextResponse.json({ error: 'Failed to insert row' }, { status: 500 }) + } +} + +/** + * GET /api/table/[tableId]/rows?workspaceId=xxx&filter=...&sort=...&limit=100&offset=0 + * Query rows from the table with filtering, sorting, and pagination + */ +export async function GET( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const workspaceId = searchParams.get('workspaceId') + const filterParam = searchParams.get('filter') + const sortParam = searchParams.get('sort') + const limit = searchParams.get('limit') + const offset = searchParams.get('offset') + + let filter + let sort + + try { + if (filterParam) { + filter = JSON.parse(filterParam) + } + if (sortParam) { + sort = JSON.parse(sortParam) + } + } catch { + return NextResponse.json({ error: 'Invalid filter or sort JSON' }, { status: 400 }) + } + + const validated = QueryRowsSchema.parse({ + workspaceId, + filter, + sort, + limit, + offset, + }) + + // Check workspace access + const { hasAccess } = await checkWorkspaceAccess(validated.workspaceId, authResult.userId) + + if (!hasAccess) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Verify table exists + const [table] = await db + .select({ id: userTableDefinitions.id }) + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Build base where conditions + const baseConditions = [ + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId), + ] + + // Add filter conditions if provided + if (validated.filter) { + const filterClause = buildFilterClause(validated.filter as QueryFilter, 'user_table_rows') + if (filterClause) { + baseConditions.push(filterClause) + } + } + + // Build query with combined conditions + let query = db + .select({ + id: userTableRows.id, + data: userTableRows.data, + createdAt: userTableRows.createdAt, + updatedAt: userTableRows.updatedAt, + }) + .from(userTableRows) + .where(and(...baseConditions)) + + // Apply sorting + if (validated.sort) { + const sortClause = buildSortClause(validated.sort, 'user_table_rows') + if (sortClause) { + query = query.orderBy(sortClause) as any + } + } else { + query = query.orderBy(userTableRows.createdAt) as any + } + + // Get total count with same filters (without pagination) + const countQuery = db + .select({ count: sql`count(*)` }) + .from(userTableRows) + .where(and(...baseConditions)) + + const [{ count: totalCount }] = await countQuery + + // Apply pagination + const rows = await query.limit(validated.limit).offset(validated.offset) + + logger.info( + `[${requestId}] Queried ${rows.length} rows from table ${tableId} (total: ${totalCount})` + ) + + return NextResponse.json({ + rows: rows.map((r) => ({ + id: r.id, + data: r.data, + createdAt: r.createdAt.toISOString(), + updatedAt: r.updatedAt.toISOString(), + })), + rowCount: rows.length, + totalCount: Number(totalCount), + limit: validated.limit, + offset: validated.offset, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error querying rows:`, error) + return NextResponse.json({ error: 'Failed to query rows' }, { status: 500 }) + } +} + +/** + * PUT /api/table/[tableId]/rows + * Update multiple rows by filter criteria + * Example: Update all rows where name contains "test" + */ +export async function PUT( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + const validated = UpdateRowsByFilterSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get table definition + const [table] = await db + .select() + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Validate new data size + const sizeValidation = validateRowSize(validated.data) + if (!sizeValidation.valid) { + return NextResponse.json( + { error: 'Invalid row data', details: sizeValidation.errors }, + { status: 400 } + ) + } + + // Build base where conditions + const baseConditions = [ + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId), + ] + + // Add filter conditions + const filterClause = buildFilterClause(validated.filter as QueryFilter, 'user_table_rows') + if (filterClause) { + baseConditions.push(filterClause) + } + + // First, get the rows that match the filter to validate against schema + let matchingRowsQuery = db + .select({ + id: userTableRows.id, + data: userTableRows.data, + }) + .from(userTableRows) + .where(and(...baseConditions)) + + if (validated.limit) { + matchingRowsQuery = matchingRowsQuery.limit(validated.limit) as any + } + + const matchingRows = await matchingRowsQuery + + if (matchingRows.length === 0) { + return NextResponse.json( + { + message: 'No rows matched the filter criteria', + updatedCount: 0, + }, + { status: 200 } + ) + } + + // Log warning for large operations but allow them + if (matchingRows.length > 1000) { + logger.warn(`[${requestId}] Updating ${matchingRows.length} rows. This may take some time.`) + } + + // Validate that merged data matches schema for each row + for (const row of matchingRows) { + const mergedData = { ...row.data, ...validated.data } + const rowValidation = validateRowAgainstSchema(mergedData, table.schema as TableSchema) + if (!rowValidation.valid) { + return NextResponse.json( + { + error: 'Updated data does not match schema', + details: rowValidation.errors, + affectedRowId: row.id, + }, + { status: 400 } + ) + } + } + + // Update rows by merging existing data with new data in batches + const now = new Date() + const BATCH_SIZE = 100 // Smaller batch for updates since each is a separate query + let totalUpdated = 0 + + for (let i = 0; i < matchingRows.length; i += BATCH_SIZE) { + const batch = matchingRows.slice(i, i + BATCH_SIZE) + const updatePromises = batch.map((row) => + db + .update(userTableRows) + .set({ + data: { ...row.data, ...validated.data }, + updatedAt: now, + }) + .where(eq(userTableRows.id, row.id)) + ) + await Promise.all(updatePromises) + totalUpdated += batch.length + logger.info( + `[${requestId}] Updated batch ${Math.floor(i / BATCH_SIZE) + 1} (${totalUpdated}/${matchingRows.length} rows)` + ) + } + + logger.info(`[${requestId}] Updated ${matchingRows.length} rows in table ${tableId}`) + + return NextResponse.json({ + message: 'Rows updated successfully', + updatedCount: matchingRows.length, + updatedRowIds: matchingRows.map((r) => r.id), + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error updating rows by filter:`, error) + + const errorMessage = error instanceof Error ? error.message : String(error) + const detailedError = `Failed to update rows: ${errorMessage}` + + return NextResponse.json({ error: detailedError }, { status: 500 }) + } +} + +/** + * DELETE /api/table/[tableId]/rows + * Delete multiple rows by filter criteria + * Example: Delete all rows where seen is false + */ +export async function DELETE( + request: NextRequest, + { params }: { params: Promise<{ tableId: string }> } +) { + const requestId = generateRequestId() + const { tableId } = await params + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + const validated = DeleteRowsByFilterSchema.parse(body) + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + validated.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Verify table exists + const [table] = await db + .select({ id: userTableDefinitions.id }) + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.id, tableId), + eq(userTableDefinitions.workspaceId, validated.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (!table) { + return NextResponse.json({ error: 'Table not found' }, { status: 404 }) + } + + // Build base where conditions + const baseConditions = [ + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId), + ] + + // Add filter conditions + const filterClause = buildFilterClause(validated.filter as QueryFilter, 'user_table_rows') + if (filterClause) { + baseConditions.push(filterClause) + } + + // Get matching rows first (for reporting and limit enforcement) + let matchingRowsQuery = db + .select({ id: userTableRows.id }) + .from(userTableRows) + .where(and(...baseConditions)) + + if (validated.limit) { + matchingRowsQuery = matchingRowsQuery.limit(validated.limit) as any + } + + const matchingRows = await matchingRowsQuery + + if (matchingRows.length === 0) { + return NextResponse.json( + { + message: 'No rows matched the filter criteria', + deletedCount: 0, + }, + { status: 200 } + ) + } + + // Log warning for large operations but allow them + if (matchingRows.length > 1000) { + logger.warn(`[${requestId}] Deleting ${matchingRows.length} rows. This may take some time.`) + } + + // Delete the matching rows in batches to avoid stack overflow + const rowIds = matchingRows.map((r) => r.id) + const BATCH_SIZE = 1000 + let totalDeleted = 0 + + for (let i = 0; i < rowIds.length; i += BATCH_SIZE) { + const batch = rowIds.slice(i, i + BATCH_SIZE) + await db.delete(userTableRows).where( + and( + eq(userTableRows.tableId, tableId), + eq(userTableRows.workspaceId, validated.workspaceId), + sql`${userTableRows.id} = ANY(ARRAY[${sql.join( + batch.map((id) => sql`${id}`), + sql`, ` + )}])` + ) + ) + totalDeleted += batch.length + logger.info( + `[${requestId}] Deleted batch ${Math.floor(i / BATCH_SIZE) + 1} (${totalDeleted}/${rowIds.length} rows)` + ) + } + + // Update row count + await db + .update(userTableDefinitions) + .set({ + rowCount: sql`${userTableDefinitions.rowCount} - ${matchingRows.length}`, + updatedAt: new Date(), + }) + .where(eq(userTableDefinitions.id, tableId)) + + logger.info(`[${requestId}] Deleted ${matchingRows.length} rows from table ${tableId}`) + + return NextResponse.json({ + message: 'Rows deleted successfully', + deletedCount: matchingRows.length, + deletedRowIds: rowIds, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error deleting rows by filter:`, error) + + const errorMessage = error instanceof Error ? error.message : String(error) + const detailedError = `Failed to delete rows: ${errorMessage}` + + return NextResponse.json({ error: detailedError }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/table/route.ts b/apps/sim/app/api/table/route.ts new file mode 100644 index 0000000000..dd7c26c831 --- /dev/null +++ b/apps/sim/app/api/table/route.ts @@ -0,0 +1,297 @@ +import { db } from '@sim/db' +import { permissions, userTableDefinitions, workspace } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq, isNull, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkHybridAuth } from '@/lib/auth/hybrid' +import { generateRequestId } from '@/lib/core/utils/request' +import { TABLE_LIMITS, validateTableName, validateTableSchema } from '@/lib/table' +import type { TableSchema } from '@/lib/table/validation' + +const logger = createLogger('TableAPI') + +const ColumnSchema = z.object({ + name: z + .string() + .min(1) + .max(TABLE_LIMITS.MAX_COLUMN_NAME_LENGTH) + .regex(/^[a-z_][a-z0-9_]*$/i, 'Invalid column name'), + type: z.enum(['string', 'number', 'boolean', 'date', 'json']), + required: z.boolean().optional().default(false), +}) + +const CreateTableSchema = z.object({ + name: z + .string() + .min(1) + .max(TABLE_LIMITS.MAX_TABLE_NAME_LENGTH) + .regex(/^[a-z_][a-z0-9_]*$/i, 'Invalid table name'), + description: z.string().max(TABLE_LIMITS.MAX_DESCRIPTION_LENGTH).optional(), + schema: z.object({ + columns: z.array(ColumnSchema).min(1).max(TABLE_LIMITS.MAX_COLUMNS_PER_TABLE), + }), + workspaceId: z.string().min(1), +}) + +const ListTablesSchema = z.object({ + workspaceId: z.string().min(1), +}) + +/** + * Check if user has write access to workspace + */ +async function checkWorkspaceAccess(workspaceId: string, userId: string) { + const [workspaceData] = await db + .select({ + id: workspace.id, + ownerId: workspace.ownerId, + }) + .from(workspace) + .where(eq(workspace.id, workspaceId)) + .limit(1) + + if (!workspaceData) { + return { hasAccess: false, canWrite: false } + } + + // Owner has full access + if (workspaceData.ownerId === userId) { + return { hasAccess: true, canWrite: true } + } + + // Check permissions + const [permission] = await db + .select({ + permissionType: permissions.permissionType, + }) + .from(permissions) + .where( + and( + eq(permissions.userId, userId), + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId) + ) + ) + .limit(1) + + if (!permission) { + return { hasAccess: false, canWrite: false } + } + + const canWrite = permission.permissionType === 'admin' || permission.permissionType === 'write' + + return { + hasAccess: true, + canWrite, + } +} + +/** + * POST /api/table + * Create a new user-defined table + */ +export async function POST(request: NextRequest) { + const requestId = generateRequestId() + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const body = await request.json() + const params = CreateTableSchema.parse(body) + + // Validate table name + const nameValidation = validateTableName(params.name) + if (!nameValidation.valid) { + return NextResponse.json( + { error: 'Invalid table name', details: nameValidation.errors }, + { status: 400 } + ) + } + + // Validate schema + const schemaValidation = validateTableSchema(params.schema as TableSchema) + if (!schemaValidation.valid) { + return NextResponse.json( + { error: 'Invalid table schema', details: schemaValidation.errors }, + { status: 400 } + ) + } + + // Check workspace access + const { hasAccess, canWrite } = await checkWorkspaceAccess( + params.workspaceId, + authResult.userId + ) + + if (!hasAccess || !canWrite) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Check workspace table limit + const [tableCount] = await db + .select({ count: sql`count(*)` }) + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.workspaceId, params.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + + if (Number(tableCount.count) >= TABLE_LIMITS.MAX_TABLES_PER_WORKSPACE) { + return NextResponse.json( + { + error: `Workspace table limit reached (${TABLE_LIMITS.MAX_TABLES_PER_WORKSPACE} tables max)`, + }, + { status: 400 } + ) + } + + // Check for duplicate table name + const [existing] = await db + .select({ id: userTableDefinitions.id }) + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.workspaceId, params.workspaceId), + eq(userTableDefinitions.name, params.name), + isNull(userTableDefinitions.deletedAt) + ) + ) + .limit(1) + + if (existing) { + return NextResponse.json( + { error: `Table "${params.name}" already exists in this workspace` }, + { status: 400 } + ) + } + + // Create table + const tableId = `tbl_${crypto.randomUUID().replace(/-/g, '')}` + const now = new Date() + + const [table] = await db + .insert(userTableDefinitions) + .values({ + id: tableId, + workspaceId: params.workspaceId, + name: params.name, + description: params.description, + schema: params.schema, + maxRows: TABLE_LIMITS.MAX_ROWS_PER_TABLE, + rowCount: 0, + createdBy: authResult.userId, + createdAt: now, + updatedAt: now, + }) + .returning() + + logger.info(`[${requestId}] Created table ${tableId} in workspace ${params.workspaceId}`) + + return NextResponse.json({ + table: { + id: table.id, + name: table.name, + description: table.description, + schema: table.schema, + rowCount: table.rowCount, + maxRows: table.maxRows, + createdAt: table.createdAt.toISOString(), + updatedAt: table.updatedAt.toISOString(), + }, + message: 'Table created successfully', + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error creating table:`, error) + return NextResponse.json({ error: 'Failed to create table' }, { status: 500 }) + } +} + +/** + * GET /api/table?workspaceId=xxx + * List all tables in a workspace + */ +export async function GET(request: NextRequest) { + const requestId = generateRequestId() + + try { + const authResult = await checkHybridAuth(request) + if (!authResult.success || !authResult.userId) { + return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const workspaceId = searchParams.get('workspaceId') + + const validation = ListTablesSchema.safeParse({ workspaceId }) + if (!validation.success) { + return NextResponse.json( + { error: 'Validation error', details: validation.error.errors }, + { status: 400 } + ) + } + + const params = validation.data + + // Check workspace access + const { hasAccess } = await checkWorkspaceAccess(params.workspaceId, authResult.userId) + + if (!hasAccess) { + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Get tables + const tables = await db + .select({ + id: userTableDefinitions.id, + name: userTableDefinitions.name, + description: userTableDefinitions.description, + schema: userTableDefinitions.schema, + rowCount: userTableDefinitions.rowCount, + maxRows: userTableDefinitions.maxRows, + createdAt: userTableDefinitions.createdAt, + updatedAt: userTableDefinitions.updatedAt, + }) + .from(userTableDefinitions) + .where( + and( + eq(userTableDefinitions.workspaceId, params.workspaceId), + isNull(userTableDefinitions.deletedAt) + ) + ) + .orderBy(userTableDefinitions.createdAt) + + logger.info(`[${requestId}] Listed ${tables.length} tables in workspace ${params.workspaceId}`) + + return NextResponse.json({ + tables: tables.map((t) => ({ + ...t, + createdAt: t.createdAt.toISOString(), + updatedAt: t.updatedAt.toISOString(), + })), + totalCount: tables.length, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Validation error', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Error listing tables:`, error) + return NextResponse.json({ error: 'Failed to list tables' }, { status: 500 }) + } +} diff --git a/apps/sim/app/workspace/[workspaceId]/tables/components/create-table-modal.tsx b/apps/sim/app/workspace/[workspaceId]/tables/components/create-table-modal.tsx new file mode 100644 index 0000000000..1d3cefd4c2 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/tables/components/create-table-modal.tsx @@ -0,0 +1,278 @@ +'use client' + +import { useState } from 'react' +import { createLogger } from '@sim/logger' +import { Plus, Trash2 } from 'lucide-react' +import { useParams } from 'next/navigation' +import { + Button, + Checkbox, + Combobox, + Input, + Label, + Modal, + ModalBody, + ModalContent, + ModalFooter, + ModalHeader, + Textarea, +} from '@/components/emcn' +import { useCreateTable } from '@/hooks/queries/use-tables' + +const logger = createLogger('CreateTableModal') + +interface ColumnDefinition { + name: string + type: 'string' | 'number' | 'boolean' | 'date' | 'json' + required: boolean +} + +interface CreateTableModalProps { + isOpen: boolean + onClose: () => void +} + +const COLUMN_TYPES = [ + { value: 'string', label: 'String' }, + { value: 'number', label: 'Number' }, + { value: 'boolean', label: 'Boolean' }, + { value: 'date', label: 'Date' }, + { value: 'json', label: 'JSON' }, +] + +export function CreateTableModal({ isOpen, onClose }: CreateTableModalProps) { + const params = useParams() + const workspaceId = params.workspaceId as string + + const [tableName, setTableName] = useState('') + const [description, setDescription] = useState('') + const [columns, setColumns] = useState([ + { name: '', type: 'string', required: false }, + ]) + const [error, setError] = useState(null) + + const createTable = useCreateTable(workspaceId) + + const handleAddColumn = () => { + setColumns([...columns, { name: '', type: 'string', required: false }]) + } + + const handleRemoveColumn = (index: number) => { + if (columns.length > 1) { + setColumns(columns.filter((_, i) => i !== index)) + } + } + + const handleColumnChange = ( + index: number, + field: keyof ColumnDefinition, + value: string | boolean + ) => { + const newColumns = [...columns] + newColumns[index] = { ...newColumns[index], [field]: value } + setColumns(newColumns) + } + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setError(null) + + if (!tableName.trim()) { + setError('Table name is required') + return + } + + // Validate column names + const validColumns = columns.filter((col) => col.name.trim()) + if (validColumns.length === 0) { + setError('At least one column is required') + return + } + + // Check for duplicate column names + const columnNames = validColumns.map((col) => col.name.toLowerCase()) + const uniqueNames = new Set(columnNames) + if (uniqueNames.size !== columnNames.length) { + setError('Duplicate column names found') + return + } + + try { + await createTable.mutateAsync({ + name: tableName, + description: description || undefined, + schema: { + columns: validColumns, + }, + }) + + // Reset form + setTableName('') + setDescription('') + setColumns([{ name: '', type: 'string', required: false }]) + setError(null) + onClose() + } catch (err) { + logger.error('Failed to create table:', err) + setError(err instanceof Error ? err.message : 'Failed to create table') + } + } + + const handleClose = () => { + // Reset form on close + setTableName('') + setDescription('') + setColumns([{ name: '', type: 'string', required: false }]) + setError(null) + onClose() + } + + return ( + + + Create New Table + +
+ {error && ( +
+ {error} +
+ )} + + {/* Table Name */} +
+ + ) => setTableName(e.target.value)} + placeholder='customers, orders, products' + className='h-[36px]' + required + /> +

+ Use lowercase with underscores (e.g., customer_orders) +

+
+ + {/* Description */} +
+ +